input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
if the sample set used to be "amps_on_chef" and now is not
if libraryPrepInstrumentData_obj and not libraryPrepType:
logger.debug(
"views - GOING TO DELETE orig_sampleSet orig_sampleSet.libraryPrepInstrumentData.id=%d"
% (orig_sampleSet.libraryPrepInstrumentData.id)
)
libraryPrepInstrumentData_obj.delete()
if sampleSetStatus == "libPrep_pending":
sampleSetStatus = "created"
elif libraryPrepType and not libraryPrepInstrumentData_obj:
libraryPrepInstrumentData_obj = SamplePrepData.objects.create(
samplePrepDataType="lib_prep"
)
logger.debug(
"views - orig_sampleSet.id=%d; GOING TO ADD libraryPrepInstrumentData_obj.id=%d"
% (orig_sampleSet.id, libraryPrepInstrumentData_obj.id)
)
if sampleSetStatus == "created":
sampleSetStatus = "libPrep_pending"
sampleSet_kwargs = {
"displayedName": sampleSetName,
"description": sampleSetDesc,
"SampleGroupType_CV_id": sampleSet_groupType_id,
"libraryPrepType": libraryPrepType,
"libraryPrepKitName": libraryPrepKitName,
"pcrPlateSerialNum": pcrPlateSerialNum,
"additionalCycles": additionalCycles,
"cyclingProtocols": cyclingProtocols,
"libraryPrepInstrument": libraryPrepInstrument,
"libraryPrepInstrumentData": libraryPrepInstrumentData_obj,
"status": sampleSetStatus,
"lastModifiedUser": user,
"lastModifiedDate": currentDateTime,
}
for field, value in sampleSet_kwargs.items():
setattr(orig_sampleSet, field, value)
orig_sampleSet.save()
logger.debug(
"views.save_sampleset - UPDATED sampleSet.id=%d"
% (orig_sampleSet.id)
)
if "edit_amp_sampleSet" in queryDict:
return True
return HttpResponse("true")
except Exception:
logger.exception(format_exc())
# return HttpResponse(json.dumps({"status": "Error saving sample set info to database!"}, cls=LazyJSONEncoder), mimetype="text/html")
message = i18n_errors.fatal_internalerror_during_save(
_SampleSet.verbose_name
) # "Cannot save sample set to database. "
if settings.DEBUG:
message += format_exc()
return HttpResponse(
json.dumps([message], cls=LazyJSONEncoder),
mimetype="application/json",
)
else:
return HttpResponse(
json.dumps(
[
i18n_errors.validationerrors_cannot_save(
_SampleSet.verbose_name, include_error_prefix=True
)
],
cls=LazyJSONEncoder,
),
mimetype="application/json",
)
else:
return HttpResponseRedirect("/sample/")
@transaction.commit_manually
def save_samplesetitem(request):
"""
create or edit a new sample set item
"""
def rollback_and_return_error(errorMessage):
if not isinstance(errorMessage, list):
errorMessage = [errorMessage]
transaction.rollback()
return HttpResponse(
json.dumps(errorMessage, cls=LazyJSONEncoder), mimetype="application/json"
)
if request.method == "POST":
queryDict = request.POST.dict()
intent = queryDict.get("intent")
logger.debug(
"POST %s save_input_samples_for_sampleset queryDict=%s"
% (intent, queryDict)
)
sampleSetItem_dict = views_helper.parse_sample_kwargs_from_dict(queryDict)
# validate sampleSetItem parameters
isValid, errorMessage = sample_validator.validate_sample_for_sampleSet(
sampleSetItem_dict
)
if not isValid:
return rollback_and_return_error(errorMessage)
# next validate sampleSetItems as a group
samplesetitem_id = queryDict.get("id")
samplesetitems = None
if samplesetitem_id:
item = SampleSetItem.objects.get(pk=samplesetitem_id)
samplesetitems = item.sampleSet.samples.all()
item_id = samplesetitem_id
sampleSet = item.sampleSet
elif "pending_sampleSetItem_list" in request.session.get("input_samples", {}):
samplesetitems = request.session["input_samples"][
"pending_sampleSetItem_list"
]
item_id = queryDict.get("pending_id")
sampleSet = None
if samplesetitems:
# validate barcoding is consistent between multiple samples
barcodeKit = queryDict.get("barcodeKit")
barcode = queryDict.get("barcode")
isValid, errorMessage = sample_validator.validate_barcoding_samplesetitems(
samplesetitems, barcodeKit, barcode, item_id
)
if not isValid:
return rollback_and_return_error(errorMessage)
# validate PCR Plate position
pcrPlateRow = queryDict.get('pcrPlateRow', "")
isValid, errorMessage = sample_validator.validate_pcrPlate_position_samplesetitems(samplesetitems, pcrPlateRow, item_id, sampleSet)
if not isValid:
return rollback_and_return_error(errorMessage)
try:
if intent == "add":
logger.info("views.save_samplesetitem - TODO!!! - unsupported for now")
elif intent == "edit":
sampleSetItem_id = queryDict.get("id")
new_sample = views_helper._create_or_update_sample_for_sampleSetItem(
sampleSetItem_dict, request.user, sampleSetItem_id
)
# process custom sample attributes, if any
isValid, errorMessage = views_helper._create_or_update_sampleAttributes_for_sampleSetItem(
request, request.user, new_sample
)
if not isValid:
return rollback_and_return_error(errorMessage)
views_helper._create_or_update_sampleSetItem(
sampleSetItem_dict, request.user, sampleSetItem_id, None, new_sample
)
elif intent == "add_pending" or intent == "edit_pending":
# process custom sample attributes, if any
isValid, errorMessage, sampleAttributes_dict = views_helper._create_pending_sampleAttributes_for_sampleSetItem(
request
)
if errorMessage:
return isValid, errorMessage, sampleAttributes_dict
sampleSetItem_pendingId = queryDict.get("pending_id")
if not sampleSetItem_pendingId:
sampleSetItem_pendingId = views_helper._get_pending_sampleSetItem_id(
request
)
# create sampleSetItem dict
sampleSetItem_dict["pending_id"] = int(sampleSetItem_pendingId)
sampleSetItem_dict["attribute_dict"] = sampleAttributes_dict
isNew = intent == "add_pending"
views_helper._update_input_samples_session_context(
request, sampleSetItem_dict, isNew
)
transaction.commit()
return HttpResponse("true")
except Exception:
logger.error(format_exc())
errorMessage = "Error saving sample"
if settings.DEBUG:
errorMessage += format_exc()
return rollback_and_return_error(errorMessage)
else:
return HttpResponseRedirect("/sample/")
@transaction.commit_manually
def save_input_samples_for_sampleset(request):
"""
create or update SampleSet with manually entered samples
"""
def rollback_and_return_error(errorMessage):
if not isinstance(errorMessage, list):
errorMessage = [errorMessage]
transaction.rollback()
return HttpResponse(
json.dumps(errorMessage, cls=LazyJSONEncoder), mimetype="application/json"
)
if request.method == "POST":
queryDict = request.POST.dict()
if (
"input_samples" not in request.session
and "edit_amp_sampleSet" not in queryDict
):
errorMessage = (
"No manually entered samples found to create a sample set."
) # TODO: i18n
return rollback_and_return_error(errorMessage)
sampleSet_ids = request.POST.getlist("sampleset", [])
logger.debug(
"POST save_input_samples_for_sampleset queryDict=%s, samplesets=%s"
% (queryDict, sampleSet_ids)
)
if "pending_sampleSetItem_list" in request.session.get("input_samples", {}):
pending_sampleSetItem_list = request.session["input_samples"][
"pending_sampleSetItem_list"
]
else:
pending_sampleSetItem_list = []
try:
# create new sample set, if any
isEdit_amp_sampleSet = False
if "edit_amp_sampleSet" in queryDict:
isEdit_amp_sampleSet = True
isValid = save_sampleset(request)
isValid, errorMessage, new_sampleSet_id = views_helper._get_or_create_sampleSet(
queryDict, request.user
)
if not isValid:
return rollback_and_return_error(errorMessage)
if new_sampleSet_id:
sampleSet_ids.append(new_sampleSet_id)
# must select at least one sampleSet to process
if not sampleSet_ids:
transaction.rollback()
return HttpResponse(
json.dumps(
[
validation.required_error(
ugettext(
"samplesets.input_samples.save.fields.sampleset.label"
),
include_error_prefix=True,
)
],
cls=LazyJSONEncoder,
),
mimetype="application/json",
) # "Error, Please select a sample set or add a new sample set first."
# validate for Ampliseq HD on chef and assign PCR plate and tube position automatically
isValid, errorMessage = sample_validator.validate_sampleset_items_limit(
pending_sampleSetItem_list, sampleSet_ids
)
if not isValid:
return rollback_and_return_error(errorMessage)
# validate new and existing sample set items as a group
isValid, errorMessage, categoryDict, parsedSamplesetitems = views_helper.validate_for_existing_samples(
pending_sampleSetItem_list, sampleSet_ids, isEdit_amp_sampleSet
)
if not isValid:
return rollback_and_return_error(errorMessage)
if categoryDict:
pending_sampleSetItem_list = views_helper.assign_tube_postion_pcr_plates(
categoryDict
)
""" TS-17723, TS-17910:Allow user to manually assign the PCR plate for Ampliseq on Chef and manual libPrep Type
else:
pending_sampleSetItem_list = views_helper.assign_pcr_plate_rows(
parsedSamplesetitems
)
"""
# create SampleSetItems from pending list
for pending_sampleSetItem_dict in pending_sampleSetItem_list:
new_sample = None
if type(pending_sampleSetItem_dict) != types.DictType: #
pending_sampleSetItem_dict = model_to_dict(
pending_sampleSetItem_dict
)
new_sample = views_helper._create_or_update_sample(
pending_sampleSetItem_dict
)
sampleAttribute_dict = (
pending_sampleSetItem_dict.get("attribute_dict") or {}
)
isValid, errorMessage = views_helper._create_or_update_sampleAttributes_for_sampleSetItem_with_dict(
request, request.user, new_sample, sampleAttribute_dict
)
if not isValid:
return rollback_and_return_error(errorMessage)
itemID = pending_sampleSetItem_dict.get("id", None)
for sampleSet_id in sampleSet_ids:
views_helper._create_or_update_sampleSetItem(
pending_sampleSetItem_dict,
request.user,
itemID,
sampleSet_id,
new_sample,
)
clear_samplesetitem_session(request)
transaction.commit()
return HttpResponse("true")
except Exception:
logger.exception(format_exc())
transaction.rollback()
# return HttpResponse(json.dumps({"status": "Error saving manually entered sample set info to database. " + format_exc()}), mimetype="text/html")
errorMessage = ugettext_lazy(
"samplesets.input_samples.save.error"
) # "Error saving manually entered sample set info to database. "
if settings.DEBUG:
errorMessage += format_exc()
return rollback_and_return_error(errorMessage)
else:
return HttpResponseRedirect("/sample/")
def clear_input_samples_for_sampleset(request):
clear_samplesetitem_session(request)
return HttpResponseRedirect("/sample/samplesetitem/input/")
"""
Get the saved samplesetitem for editing during sample set edit
Sample set and items need to be validated for Ampliseq HD on chef
"""
def get_persisted_input_samples_data(request, setID=None):
sampleset = SampleSet.objects.get(pk=setID)
samplesetitems = list(sampleset.samples.all())
custom_sample_column_list = list(
SampleAttribute.objects.filter(isActive=True).order_by("id")
)
items_dataDict = []
for item in samplesetitems:
itemDict = model_to_dict(item)
itemDict["displayedName"] = item.sample.displayedName
itemDict["externalId"] = item.sample.externalId
itemDict["description"] = item.sample.description
itemDict["pending_id"] = item.id
dnabarcode = item.dnabarcode
if dnabarcode:
itemDict["barcode"] = dnabarcode.id_str
itemDict["barcodeKit"] = dnabarcode.name
# Get custom sample attribute for the persisted items
for cutom_sample_attrbute in custom_sample_column_list:
attributeName = cutom_sample_attrbute.displayedName
attribute = SampleAttributeValue.objects.filter(
sample=item.sample.id, sampleAttribute=cutom_sample_attrbute
)
if attribute:
itemDict["attribute_dict"] = {attributeName: attribute[0].value}
else:
itemDict["attribute_dict"] = {attributeName: None}
items_dataDict.append(itemDict)
# provided option to add new item while editing sample set
if "input_samples" in request.session:
pending_sampleSetItem_list = request.session["input_samples"].get(
"pending_sampleSetItem_list"
)
for pending_item in pending_sampleSetItem_list:
items_dataDict.append(pending_item)
data = {}
data["meta"] = {}
data["meta"]["total_count"] = views_helper._get_pending_sampleSetItem_count(
request
) + len(samplesetitems)
data["objects"] = items_dataDict
json_data = json.dumps(data, cls=LazyDjangoJSONEncoder)
logger.debug("views.get_persisted_input_samples_data json_data=%s" % (json_data))
return HttpResponse(json_data, mimetype="application/json")
def get_input_samples_data(request):
data = {}
data["meta"] = {}
data["meta"]["total_count"] = views_helper._get_pending_sampleSetItem_count(request)
data["objects"] = request.session["input_samples"]["pending_sampleSetItem_list"]
json_data = json.dumps(data, cls=LazyJSONEncoder)
logger.debug("views.get_input_samples_data json_data=%s" % (json_data))
return HttpResponse(json_data, mimetype="application/json")
# return HttpResponse(json_data, mimetype="text/html")
@transaction.commit_manually
def save_import_samplesetitems(request):
"""
save the imported samples from file for sample set creation
"""
ERROR_MSG_SAMPLE_IMPORT_VALIDATION = ugettext_lazy(
"import_samples.messages.failure"
) # "Import Samples validation failed. The samples have not been imported. Please correct the errors and try again or choose a different sample file to import. "
def _fail(_status, _failed=None, isError=True, mimetype="application/json"):
# helper method to clean up and return HttpResponse with error messages
transaction.rollback()
json_body = {"status": _status, "error": isError}
if _failed:
json_body["failed"] = _failed
logger.info("views.save_import_samplesetitems() error=%s" % json_body)
return HttpResponse(
json.dumps(json_body, cls=LazyJSONEncoder), mimetype=mimetype
)
def _success(_status, _failed=None, mimetype="application/json"):
transaction.commit()
json_body = {"status": _status, "error": False}
if _failed:
json_body["failed"] = _failed
json_body["error"] = True
return HttpResponse(
json.dumps(json_body, cls=LazyJSONEncoder), mimetype=mimetype
)
if request.method != "POST":
logger.exception(format_exc())
transaction.rollback()
return _fail(status=i18n_errors.fatal_unsupported_http_method(request.method))
postedfile = request.FILES["postedfile"]
destination = tempfile.NamedTemporaryFile(delete=False)
for chunk in postedfile.chunks():
destination.write(chunk)
postedfile.close()
destination.close()
non_ascii_files = []
# open read bytes and detect if file contains non-ascii characters
with open(destination.name, "rU") as _tmp:
if not is_ascii(_tmp.read()): # if not ascii
non_ascii_files.append(postedfile.name) # add uploaded file name
if len(non_ascii_files) > 0:
# "Only ASCII characters are supported. The following files contain non-ASCII characters: %(files)s."
_error_msg = validation.format(
ugettext("import_samples.messages.file_contains_non_ascii_characters"),
{"files": SeparatedValuesBuilder().build(non_ascii_files)},
)
os.unlink(destination.name)
return _fail(_status=_error_msg)
# check to ensure it is not empty
headerCheck = open(destination.name, "rU")
firstCSV = []
for firstRow in csv.reader(headerCheck):
firstCSV.append(firstRow)
# logger.info("views.save_import_samplesetitems() firstRow=%s;" %(firstRow))
headerCheck.close()
if not firstCSV:
os.unlink(destination.name)
return _fail(_status=validation.invalid_empty(postedfile))
index = 0
errorMsg = []
samples = []
rawSampleDataList = []
sampleSetItemList = []
failed = {}
file = open(destination.name, "rU")
csv_version_row = csv.reader(
file
).next() # skip the csv template version header and | |
*(string) --*
- *(dict) --*
Represents the amount of provisioned throughput capacity consumed on a table or an index.
- **ReadCapacityUnits** *(float) --*
The total number of read capacity units consumed on a table or an index.
- **WriteCapacityUnits** *(float) --*
The total number of write capacity units consumed on a table or an index.
- **CapacityUnits** *(float) --*
The total number of capacity units consumed on a table or an index.
:type RequestItems: dict
:param RequestItems: **[REQUIRED]**
A map of one or more table names and, for each table, a map that describes one or more items to retrieve from that table. Each table name can be used only once per ``BatchGetItem`` request.
Each element in the map of items to retrieve consists of the following:
* ``ConsistentRead`` - If ``true`` , a strongly consistent read is used; if ``false`` (the default), an eventually consistent read is used.
* ``ExpressionAttributeNames`` - One or more substitution tokens for attribute names in the ``ProjectionExpression`` parameter. The following are some use cases for using ``ExpressionAttributeNames`` :
* To access an attribute whose name conflicts with a DynamoDB reserved word.
* To create a placeholder for repeating occurrences of an attribute name in an expression.
* To prevent special characters in an attribute name from being misinterpreted in an expression.
Use the **#** character in an expression to dereference an attribute name. For example, consider the following attribute name:
* ``Percentile``
The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see `Reserved Words <https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html>`__ in the *Amazon DynamoDB Developer Guide* ). To work around this, you could specify the following for ``ExpressionAttributeNames`` :
* ``{\"#P\":\"Percentile\"}``
You could then use this substitution in an expression, as in this example:
* ``#P = :val``
.. note::
Tokens that begin with the **:** character are *expression attribute values* , which are placeholders for the actual value at runtime.
For more information on expression attribute names, see `Accessing Item Attributes <https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html>`__ in the *Amazon DynamoDB Developer Guide* .
* ``Keys`` - An array of primary key attribute values that define specific items in the table. For each primary key, you must provide *all* of the key attributes. For example, with a simple primary key, you only need to provide the partition key value. For a composite key, you must provide *both* the partition key value and the sort key value.
* ``ProjectionExpression`` - A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas. If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result. For more information, see `Accessing Item Attributes <https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html>`__ in the *Amazon DynamoDB Developer Guide* .
* ``AttributesToGet`` - This is a legacy parameter. Use ``ProjectionExpression`` instead. For more information, see `AttributesToGet <https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html>`__ in the *Amazon DynamoDB Developer Guide* .
- *(string) --*
- *(dict) --*
Represents a set of primary keys and, for each key, the attributes to retrieve from the table.
For each primary key, you must provide *all* of the key attributes. For example, with a simple primary key, you only need to provide the partition key. For a composite primary key, you must provide *both* the partition key and the sort key.
- **Keys** *(list) --* **[REQUIRED]**
The primary key attribute values that define the items and the attributes associated with the items.
- *(dict) --*
- *(string) --*
- *(dict) --*
Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see `Data Types <https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes>`__ in the *Amazon DynamoDB Developer Guide* .
- **S** *(string) --*
An attribute of type String. For example:
``\"S\": \"Hello\"``
- **N** *(string) --*
An attribute of type Number. For example:
``\"N\": \"123.45\"``
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
- **B** *(bytes) --*
An attribute of type Binary. For example:
``\"B\": \"dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk\"``
- **SS** *(list) --*
An attribute of type String Set. For example:
``\"SS\": [\"Giraffe\", \"Hippo\" ,\"Zebra\"]``
- *(string) --*
- **NS** *(list) --*
An attribute of type Number Set. For example:
``\"NS\": [\"42.2\", \"-19\", \"7.5\", \"3.14\"]``
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
- *(string) --*
- **BS** *(list) --*
An attribute of type Binary Set. For example:
``\"BS\": [\"U3Vubnk=\", \"UmFpbnk=\", \"U25vd3k=\"]``
- *(bytes) --*
- **M** *(dict) --*
An attribute of type Map. For example:
``\"M\": {\"Name\": {\"S\": \"Joe\"}, \"Age\": {\"N\": \"35\"}}``
- *(string) --*
- *(dict) --*
Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see `Data Types <https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes>`__ in the *Amazon DynamoDB Developer Guide* .
- **L** *(list) --*
An attribute of type List. For example:
``\"L\": [ {\"S\": \"Cookies\"} , {\"S\": \"Coffee\"}, {\"N\", \"3.14159\"}]``
- *(dict) --*
Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see `Data Types <https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes>`__ in the *Amazon DynamoDB Developer Guide* .
- **NULL** *(boolean) --*
An attribute of type Null. For example:
``\"NULL\": true``
- **BOOL** *(boolean) --*
An attribute of type Boolean. For example:
``\"BOOL\": true``
- **AttributesToGet** *(list) --*
This is a legacy parameter. Use ``ProjectionExpression`` instead. For more information, see `Legacy Conditional Parameters <https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html>`__ in the *Amazon DynamoDB Developer Guide* .
- *(string) --*
- **ConsistentRead** *(boolean) --*
The consistency of a read operation. If set to ``true`` , then a strongly consistent read is used; otherwise, an eventually consistent read is used.
- **ProjectionExpression** *(string) --*
A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the ``ProjectionExpression`` must be separated by commas.
If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.
For more information, see `Accessing Item Attributes <https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html>`__ in the *Amazon DynamoDB Developer Guide* .
- **ExpressionAttributeNames** *(dict) --*
One or more substitution tokens for attribute names in an expression. The following are some use cases for using ``ExpressionAttributeNames`` :
* To access an attribute whose name conflicts with a DynamoDB reserved word.
* To create a placeholder for repeating occurrences of an attribute name in an expression.
* To prevent special characters in an attribute name from being misinterpreted in an expression.
Use the **#** character in an expression to dereference an attribute name. For example, consider the following attribute name:
* ``Percentile``
The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see `Reserved Words <https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html>`__ in the *Amazon DynamoDB Developer Guide* ). To work around this, you could specify the following for ``ExpressionAttributeNames`` :
* ``{\"#P\":\"Percentile\"}``
You could then use this substitution in an expression, as in this example:
* ``#P = :val``
.. note::
Tokens that begin with the **:** character are *expression attribute values* , | |
df_dhw_fossil = df_dhw_fossil.loc[~df_dhw_fossil.sy.isna()]
dct_dmnd_dhw_fossil = (df_dhw_fossil.set_index(['sy', 'nd_id', 'ca_id'])['value_new'].to_dict())
if slct_hp == 'full':
for kk, vv in dct_dmnd_ht_retr_1pc.items():
self.ml.m.dmnd[kk] = vv
for kk, vv in dct_dmnd_dhw.items():
self.ml.m.dmnd[kk] = vv
elif slct_hp == 'bau':
for kk, vv in dct_dmnd_ht_retr_1pc.items():
self.ml.m.dmnd[kk] = vv * 0.568
for kk, vv in dct_dmnd_dhw.items():
self.ml.m.dmnd[kk] = vv * 0.568
elif slct_hp == 'fossil':
for kk, vv in dct_dmnd_ht_retr_1pc_fossil.items():
self.ml.m.dmnd[kk] = vv
for kk, vv in dct_dmnd_dhw_fossil.items():
self.ml.m.dmnd[kk] = vv
elif slct_rf == 'retr_2pc':
msg = ('Setting special heating dmnd_sum to values' + str_fy.replace('_', ' ')
+ ' from column {}'.format(slct_col) + ' for retrofit scenario ' + slct_rf+ ' and for HP scenario ' + slct_hp)
logger.info(msg)
#HEAT
df_ht_retr_2pc = self.ml.m.df_profdmnd.loc[self.ml.m.df_profdmnd.dmnd_pf_id.isin(list_pf_id_dmnd_ht)].copy()
df_ht_retr_2pc['nd_id'] = df_ht_retr_2pc['dmnd_pf_id'].map(dct_pf_id_dmnd_ht)
df_ht_retr_2pc['sy'] = df_ht_retr_2pc['doy'].map(dict_tm_soy)
df_ht_retr_2pc['ca_id'] = 0
df_ht_retr_2pc.loc[df_ht_retr_2pc.dmnd_pf_id.isin(pf_ca_id_1),'ca_id'] = 1
df_ht_retr_2pc.loc[df_ht_retr_2pc.dmnd_pf_id.isin(pf_ca_id_2),'ca_id'] = 2
df_ht_retr_2pc = df_ht_retr_2pc[['nd_id', 'sy','erg_tot_retr_2pc','ca_id']].join(scaling_factor_ht, on=scaling_factor_ht.index.names)
df_ht_retr_2pc['value_new'] = df_ht_retr_2pc.erg_tot_retr_2pc * df_ht_retr_2pc.scale
df_ht_retr_2pc = df_ht_retr_2pc.loc[~df_ht_retr_2pc.sy.isna()]
dct_dmnd_ht_retr_2pc = (df_ht_retr_2pc.set_index(['sy', 'nd_id', 'ca_id'])['value_new'].to_dict())
df_ht_retr_2pc_fossil = df_ht_other.copy()
df_ht_retr_2pc_fossil['value_new'] = df_ht_retr_2pc_fossil.erg_tot_retr_2pc_fossil * df_ht_retr_2pc_fossil.scale
df_ht_retr_2pc_fossil = df_ht_retr_2pc_fossil.loc[~df_ht_retr_2pc_fossil.sy.isna()]
dct_dmnd_ht_retr_2pc_fossil = (df_ht_retr_2pc_fossil.set_index(['sy', 'nd_id', 'ca_id'])['value_new'].to_dict())
# DHW for AW and WW
df_dhw = IO.param_to_df(self.ml.m.dmnd).loc[IO.param_to_df(self.ml.m.dmnd).ca_id.isin([4,5])]
df_dhw = df_dhw.join(scaling_factor_dhw, on=scaling_factor_dhw.index.names)
df_dhw['value_new'] = df_dhw.value * df_dhw.scale
dct_dmnd_dhw = (df_dhw.set_index(['sy', 'nd_id', 'ca_id'])['value_new'].to_dict())
df_dhw_fossil = df_dhw_other.copy()
df_dhw_fossil['value_new'] = df_dhw_fossil.erg_tot_fossil * df_dhw_fossil.scale
df_dhw_fossil = df_dhw_fossil.loc[~df_dhw_fossil.sy.isna()]
dct_dmnd_dhw_fossil = (df_dhw_fossil.set_index(['sy', 'nd_id', 'ca_id'])['value_new'].to_dict())
if slct_hp == 'full':
for kk, vv in dct_dmnd_ht_retr_2pc.items():
self.ml.m.dmnd[kk] = vv
for kk, vv in dct_dmnd_dhw.items():
self.ml.m.dmnd[kk] = vv
elif slct_hp == 'bau':
for kk, vv in dct_dmnd_ht_retr_2pc.items():
self.ml.m.dmnd[kk] = vv * 0.568
for kk, vv in dct_dmnd_dhw.items():
self.ml.m.dmnd[kk] = vv * 0.568
elif slct_hp == 'fossil':
for kk, vv in dct_dmnd_ht_retr_2pc_fossil.items():
self.ml.m.dmnd[kk] = vv
for kk, vv in dct_dmnd_dhw_fossil.items():
self.ml.m.dmnd[kk] = vv
if slct_dpf == 'ee': # Normal Energy efficiency
slct_col = 'dmnd_sum' + str_fy
slct_col_prev = 'dmnd_sum' + str_fyp
dict_fy = {
0: 2015,
1: 2020,
2: 2025,
3: 2030,
4: 2035,
5: 2040,
6: 2045,
7: 2050,
}
slct_fy = self.ml.dct_step['swfy']
fy_val = dict_fy[slct_fy]
msg = ('Setting energy efficiency curves to residential for ' + str_fy.replace('_', ' ')
+ ' from column {}'.format(slct_col) + ' for ee scenario ' + slct_dpf)
logger.info(msg)
# Energy efficiency
list_nd_name_res_el = self.ml.m.df_def_node.loc[self.ml.m.df_def_node.nd_id.isin(nd_arch_el_res)].nd.to_list()
dct_nd_id_res = self.ml.m.df_def_node.loc[self.ml.m.df_def_node.nd_id.isin(nd_arch_el_res)].set_index('nd')['nd_id'].to_dict()
path_prof_id = os.path.join(config.PATH_CSV, 'def_profile.csv')
path_dmnd = os.path.join(config.PATH_CSV, 'profdmnd.csv')
df_def_profile = pd.read_csv(path_prof_id)
df_def_profile_ee = df_def_profile.copy().loc[(df_def_profile.pf.str.contains('diff'))&
(df_def_profile.primary_nd.isin(list_nd_name_res_el))&
~(df_def_profile.pf.str.contains('best'))]
df_def_profile_ee['nd_id'] = df_def_profile_ee['primary_nd'].map(dct_nd_id_res)
df_def_profile_ee_2035 = df_def_profile_ee.loc[df_def_profile_ee.pf.str.contains('2035_2015')]
df_def_profile_ee_2050 = df_def_profile_ee.loc[df_def_profile_ee.pf.str.contains('2050_2015')]
df_dmnd = pd.read_csv(path_dmnd)[['dmnd_pf_id', 'hy', 'value', 'doy']]
df_dmnd_ee_2035 = df_dmnd.copy().loc[df_dmnd.dmnd_pf_id.isin(df_def_profile_ee_2035.pf_id.to_list())].reset_index(drop=True).rename(columns={'value':'diff_erg'})
df_dmnd_ee_2035 = pd.merge(df_dmnd_ee_2035,df_def_profile_ee_2035,left_on='dmnd_pf_id', right_on='pf_id')
df_dmnd_ee_2050 = df_dmnd.copy().loc[df_dmnd.dmnd_pf_id.isin(df_def_profile_ee_2050.pf_id.to_list())].reset_index(drop=True).rename(columns={'value':'diff_erg'})
df_dmnd_ee_2050 = pd.merge(df_dmnd_ee_2050,df_def_profile_ee_2050,left_on='dmnd_pf_id', right_on='pf_id')
dict_tm_soy_hour = self.ml.m.df_tm_soy_full.loc[self.ml.m.df_tm_soy_full.tm_id==0][['sy','hy']].set_index('hy')['sy'].to_dict()
df_dmnd_ee_2035['sy'] = df_dmnd_ee_2035['hy'].map(dict_tm_soy_hour)
df_dmnd_ee_2050['sy'] = df_dmnd_ee_2050['hy'].map(dict_tm_soy_hour)
# df_dmnd_ee_2035 = df_dmnd_ee_2035.loc[~df_dmnd_ee_2035]
df_el_res = IO.param_to_df(self.ml.m.dmnd).loc[(IO.param_to_df(self.ml.m.dmnd).ca_id == 0) & (IO.param_to_df(self.ml.m.dmnd).nd_id.isin(nd_arch_el_res))]
if fy_val == 2015:
dct_dmnd_res = (df_el_res.set_index(['sy', 'nd_id', 'ca_id'])['value'].to_dict())
for kk, vv in dct_dmnd_res.items():
self.ml.m.dmnd[kk] = vv
elif fy_val == 2020:
df_el_res_ee = pd.merge(df_el_res,df_dmnd_ee_2035,on=['sy','nd_id']).assign(
value_new = lambda x: x.value + x.diff_erg*(1/4))
dct_dmnd_res_ee = (df_el_res_ee.set_index(['sy', 'nd_id', 'ca_id'])['value_new'].to_dict())
for kk, vv in dct_dmnd_res_ee.items():
self.ml.m.dmnd[kk] = vv
elif fy_val == 2025:
df_el_res_ee = pd.merge(df_el_res,df_dmnd_ee_2035,on=['sy','nd_id']).assign(
value_new = lambda x: x.value + x.diff_erg*(2/4))
dct_dmnd_res_ee = (df_el_res_ee.set_index(['sy', 'nd_id', 'ca_id'])['value_new'].to_dict())
for kk, vv in dct_dmnd_res_ee.items():
self.ml.m.dmnd[kk] = vv
elif fy_val == 2030:
df_el_res_ee = pd.merge(df_el_res,df_dmnd_ee_2035,on=['sy','nd_id']).assign(
value_new = lambda x: x.value + x.diff_erg*(3/4))
dct_dmnd_res_ee = (df_el_res_ee.set_index(['sy', 'nd_id', 'ca_id'])['value_new'].to_dict())
for kk, vv in dct_dmnd_res_ee.items():
self.ml.m.dmnd[kk] = vv
elif fy_val == 2035:
df_el_res_ee = pd.merge(df_el_res,df_dmnd_ee_2035,on=['sy','nd_id']).assign(
value_new = lambda x: x.value + x.diff_erg*(4/4))
dct_dmnd_res_ee = (df_el_res_ee.set_index(['sy', 'nd_id', 'ca_id'])['value_new'].to_dict())
for kk, vv in dct_dmnd_res_ee.items():
self.ml.m.dmnd[kk] = vv
elif fy_val == 2040:
df_el_res_ee = pd.merge(df_el_res,df_dmnd_ee_2050,on=['sy','nd_id']).assign(
value_new = lambda x: x.value + x.diff_erg*(5/7))
dct_dmnd_res_ee = (df_el_res_ee.set_index(['sy', 'nd_id', 'ca_id'])['value_new'].to_dict())
for kk, vv in dct_dmnd_res_ee.items():
self.ml.m.dmnd[kk] = vv
elif fy_val == 2045:
df_el_res_ee = pd.merge(df_el_res,df_dmnd_ee_2050,on=['sy','nd_id']).assign(
value_new = lambda x: x.value + x.diff_erg*(6/7))
dct_dmnd_res_ee = (df_el_res_ee.set_index(['sy', 'nd_id', 'ca_id'])['value_new'].to_dict())
for kk, vv in dct_dmnd_res_ee.items():
self.ml.m.dmnd[kk] = vv
elif fy_val == 2050:
df_el_res_ee = pd.merge(df_el_res,df_dmnd_ee_2050,on=['sy','nd_id']).assign(
value_new = lambda x: x.value + x.diff_erg*(7/7))
dct_dmnd_res_ee = (df_el_res_ee.set_index(['sy', 'nd_id', 'ca_id'])['value_new'].to_dict())
for kk, vv in dct_dmnd_res_ee.items():
self.ml.m.dmnd[kk] = vv
elif slct_dpf == 'best': # BEST Energy efficiency
slct_col = 'dmnd_sum' + str_fy
slct_col_prev = 'dmnd_sum' + str_fyp
dict_fy = {
0: 2015,
1: 2020,
2: 2025,
3: 2030,
4: 2035,
5: 2040,
6: 2045,
7: 2050,
}
slct_fy = self.ml.dct_step['swfy']
fy_val = dict_fy[slct_fy]
msg = ('Setting energy efficiency curves to residential for ' + str_fy.replace('_', ' ')
+ ' from column {}'.format(slct_col) + ' for ee scenario ' + slct_dpf)
logger.info(msg)
# Energy efficiency
list_nd_name_res_el = self.ml.m.df_def_node.loc[self.ml.m.df_def_node.nd_id.isin(nd_arch_el_res)].nd.to_list()
dct_nd_id_res = self.ml.m.df_def_node.loc[self.ml.m.df_def_node.nd_id.isin(nd_arch_el_res)].set_index('nd')['nd_id'].to_dict()
path_prof_id = os.path.join(config.PATH_CSV, 'def_profile.csv')
path_dmnd = os.path.join(config.PATH_CSV, 'profdmnd.csv')
df_def_profile = pd.read_csv(path_prof_id)
df_def_profile_ee = df_def_profile.copy().loc[(df_def_profile.pf.str.contains('best_2035|2050'))&(df_def_profile.primary_nd.isin(list_nd_name_res_el))]
df_def_profile_ee['nd_id'] = df_def_profile_ee['primary_nd'].map(dct_nd_id_res)
df_def_profile_ee_2035 = df_def_profile_ee.loc[df_def_profile_ee.pf.str.contains('2035_2015')]
df_def_profile_ee_2050 = df_def_profile_ee.loc[df_def_profile_ee.pf.str.contains('2050_2015')]
df_dmnd = pd.read_csv(path_dmnd)[['dmnd_pf_id', 'hy', 'value', 'doy']]
df_dmnd_ee_2035 = df_dmnd.copy().loc[df_dmnd.dmnd_pf_id.isin(df_def_profile_ee_2035.pf_id.to_list())].reset_index(drop=True).rename(columns={'value':'diff_erg'})
df_dmnd_ee_2035 = pd.merge(df_dmnd_ee_2035,df_def_profile_ee_2035,left_on='dmnd_pf_id', right_on='pf_id')
df_dmnd_ee_2050 = df_dmnd.copy().loc[df_dmnd.dmnd_pf_id.isin(df_def_profile_ee_2050.pf_id.to_list())].reset_index(drop=True).rename(columns={'value':'diff_erg'})
df_dmnd_ee_2050 = pd.merge(df_dmnd_ee_2050,df_def_profile_ee_2050,left_on='dmnd_pf_id', right_on='pf_id')
dict_tm_soy_hour = self.ml.m.df_tm_soy_full.loc[self.ml.m.df_tm_soy_full.tm_id==0][['sy','hy']].set_index('hy')['sy'].to_dict()
df_dmnd_ee_2035['sy'] = df_dmnd_ee_2035['hy'].map(dict_tm_soy_hour)
df_dmnd_ee_2050['sy'] = df_dmnd_ee_2050['hy'].map(dict_tm_soy_hour)
df_el_res = IO.param_to_df(self.ml.m.dmnd).loc[(IO.param_to_df(self.ml.m.dmnd).ca_id == 0) & (IO.param_to_df(self.ml.m.dmnd).nd_id.isin(nd_arch_el_res))]
if fy_val == 2015:
dct_dmnd_res = (df_el_res.set_index(['sy', 'nd_id', 'ca_id'])['value'].to_dict())
for kk, vv in dct_dmnd_res.items():
self.ml.m.dmnd[kk] = vv
elif fy_val == 2020:
df_el_res_ee = pd.merge(df_el_res,df_dmnd_ee_2035,on=['sy','nd_id']).assign(
value_new = lambda x: x.value + x.diff_erg*(1/4))
dct_dmnd_res_ee = (df_el_res_ee.set_index(['sy', 'nd_id', 'ca_id'])['value_new'].to_dict())
for kk, vv in dct_dmnd_res_ee.items():
self.ml.m.dmnd[kk] = vv
elif fy_val == 2025:
df_el_res_ee = pd.merge(df_el_res,df_dmnd_ee_2035,on=['sy','nd_id']).assign(
value_new = lambda x: x.value + x.diff_erg*(2/4))
dct_dmnd_res_ee = (df_el_res_ee.set_index(['sy', 'nd_id', 'ca_id'])['value_new'].to_dict())
for kk, vv in dct_dmnd_res_ee.items():
self.ml.m.dmnd[kk] = vv
elif fy_val == 2030:
df_el_res_ee = pd.merge(df_el_res,df_dmnd_ee_2035,on=['sy','nd_id']).assign(
value_new = lambda x: x.value + x.diff_erg*(3/4))
dct_dmnd_res_ee = (df_el_res_ee.set_index(['sy', 'nd_id', 'ca_id'])['value_new'].to_dict())
for kk, vv in dct_dmnd_res_ee.items():
self.ml.m.dmnd[kk] = vv
elif fy_val == 2035:
df_el_res_ee = pd.merge(df_el_res,df_dmnd_ee_2035,on=['sy','nd_id']).assign(
value_new = lambda x: x.value + x.diff_erg*(4/4))
dct_dmnd_res_ee = (df_el_res_ee.set_index(['sy', 'nd_id', 'ca_id'])['value_new'].to_dict())
for kk, vv in dct_dmnd_res_ee.items():
self.ml.m.dmnd[kk] = vv
elif fy_val == 2040:
df_el_res_ee = pd.merge(df_el_res,df_dmnd_ee_2050,on=['sy','nd_id']).assign(
value_new = lambda x: x.value + x.diff_erg*(5/7))
dct_dmnd_res_ee = (df_el_res_ee.set_index(['sy', 'nd_id', 'ca_id'])['value_new'].to_dict())
for kk, vv in dct_dmnd_res_ee.items():
self.ml.m.dmnd[kk] = vv
elif fy_val == 2045:
df_el_res_ee = pd.merge(df_el_res,df_dmnd_ee_2050,on=['sy','nd_id']).assign(
value_new = lambda x: x.value + x.diff_erg*(6/7))
dct_dmnd_res_ee = (df_el_res_ee.set_index(['sy', 'nd_id', 'ca_id'])['value_new'].to_dict())
for kk, vv in dct_dmnd_res_ee.items():
self.ml.m.dmnd[kk] = vv
elif fy_val == 2050:
df_el_res_ee = pd.merge(df_el_res,df_dmnd_ee_2050,on=['sy','nd_id']).assign(
value_new = lambda x: x.value + x.diff_erg*(7/7))
dct_dmnd_res_ee = (df_el_res_ee.set_index(['sy', 'nd_id', 'ca_id'])['value_new'].to_dict())
for kk, vv in dct_dmnd_res_ee.items():
self.ml.m.dmnd[kk] = vv
#######################################################################
def set_co2_price(str_fy=None):
''' Select CO2 price for selected year. '''
if str_fy == None:
str_fy = ''
slct_col = 'price_co2' + str_fy
msg = ('Setting price_co2 to values' + str_fy.replace('_', ' ')
+ ' from column {}'.format(slct_col))
logger.info(msg)
df_new = self.ml.m.df_def_node[['nd_id', slct_col]]
par = self.ml.m.dict_par['price_co2']
col_mt_fact = 'mt_fact' if not str_fy else 'mt_fact_others'
par.init_update(df_new, col_mt_fact)
#######################################################################
def set_erg_inp(slct_ch, str_fy=None, excl_fl=[]):
''' Select exogenous energy production for selected year. '''
if str_fy == None:
str_fy = ''
slct_col = 'erg_inp' + str_fy
msg = ('Setting erg_inp to values ' + str_fy.replace('_', '')
+ 'from column {}.'.format(slct_col))
logger.info(msg)
ind_col = ['nd_id', 'ca_id', 'fl_id']
mask_excl = -self.ml.m.df_fuel_node_encar.fl_id.isin(excl_fl)
dct_erg_inp = (self.ml.m.df_fuel_node_encar.loc[mask_excl]
.set_index(ind_col)[slct_col].to_dict())
if slct_ch != 'default':
df = self.ml.m.df_fuel_node_encar_scenarios.copy()
df = df.loc[df.scenario == slct_ch]
dct_erg_inp_scen = (df.set_index(ind_col)[slct_col].to_dict())
dct_erg_inp.update(dct_erg_inp_scen)
for kk, vv in dct_erg_inp.items():
self.ml.m.erg_inp[kk] = vv
#######################################################################
def set_erg_chp(str_fy=None, excl_pp=[]):
''' Select exogenous chp energy production for selected year. '''
if str_fy == None:
str_fy = ''
slct_col = 'erg_chp' + str_fy
msg = ('Setting erg_chp to values ' + str_fy.replace('_', '')
+ ' from column {}.'.format(slct_col))
logger.info(msg)
dct_erg_chp = (self.ml.m.df_plant_encar
.loc[-self.ml.m.df_plant_encar.pp_id.isin(excl_pp)
& self.ml.m.df_plant_encar.pp_id.isin(self.ml.m.chp)]
.set_index(['pp_id', 'ca_id'])[slct_col]
.to_dict())
| |
partial dependence for all the points.
yi = [_calc(x) for x in xi_transformed]
return xi, yi
def partial_dependence_2D(space, model, i, j, samples,
n_points=40):
"""
Calculate the partial dependence for two dimensions in the search-space.
This uses the given model to calculate the average objective value
for all the samples, where the given dimensions are fixed at
regular intervals between their bounds.
This shows how the given dimensions affect the objective value
when the influence of all other dimensions are averaged out.
Parameters
----------
space : `Space`
The parameter space over which the minimization was performed.
model
Surrogate model for the objective function.
i : int
The first dimension for which to calculate the partial dependence.
j : int
The second dimension for which to calculate the partial dependence.
samples : np.array, shape=(n_points, n_dims)
Randomly sampled and transformed points to use when averaging
the model function at each of the `n_points` when using partial
dependence.
n_points : int, default=40
Number of points at which to evaluate the partial dependence
along each dimension `i` and `j`.
Returns
-------
xi : np.array, shape=n_points
The points at which the partial dependence was evaluated.
yi : np.array, shape=n_points
The points at which the partial dependence was evaluated.
zi : np.array, shape=(n_points, n_points)
The average value of the objective function at each point `(xi, yi)`.
"""
# The idea is to step through one dimension, evaluating the model with
# that dimension fixed and averaging either over random values or over
# the given ones in x_val in all other dimensions.
# (Or step through 2 dimensions when i and j are given.)
# Categorical dimensions make this interesting, because they are one-
# hot-encoded, so there is a one-to-many mapping of input dimensions
# to transformed (model) dimensions.
# dim_locs[i] is the (column index of the) start of dim i in
# sample_points.
# This is usefull when we are using one hot encoding, i.e using
# categorical values
dim_locs = np.cumsum([0] + [d.transformed_size for d in space.dimensions])
def _calc(x, y):
"""
Helper-function to calculate the average predicted
objective value for the given model, when setting
the index1'th dimension of the search-space to the value x
and setting the index2'th dimension to the value y,
and then averaging over all samples.
"""
rvs_ = np.array(samples) # copy
rvs_[:, dim_locs[j]:dim_locs[j + 1]] = x
rvs_[:, dim_locs[i]:dim_locs[i + 1]] = y
return np.mean(model.predict(rvs_))
xi, xi_transformed = _evenly_sample(space.dimensions[j], n_points)
yi, yi_transformed = _evenly_sample(space.dimensions[i], n_points)
# Calculate the partial dependence for all combinations of these points.
zi = [[_calc(x, y) for x in xi_transformed] for y in yi_transformed]
# Convert list-of-list to a numpy array.
zi = np.array(zi)
return xi, yi, zi
def plot_objective_2D(result, dimension_identifier1, dimension_identifier2,
n_points=40, n_samples=250, levels=10, zscale='linear',
sample_source='random',
minimum='result', n_minimum_search=None, ax=None):
"""
Create and return a Matplotlib figure and axes with a landscape
contour-plot of the last fitted model of the search-space,
overlaid with all the samples from the optimization results,
for the two given dimensions of the search-space.
This is similar to `plot_objective()` but only for 2 dimensions
whose doc-string also has a more extensive explanation.
Parameters
----------
result : `OptimizeResult`
The optimization results e.g. from calling `gp_minimize()`.
dimension_identifier1 : str or int
Name or index of a dimension in the search-space.
dimension_identifier2 : str or int
Name or index of a dimension in the search-space.
n_samples : int, default=250
Number of random samples used for estimating the contour-plot
of the objective function.
n_points : int, default=40
Number of points along each dimension where the partial dependence
is evaluated when generating the contour-plots.
levels : int, default=10
Number of levels to draw on the contour plot.
zscale : str, default='linear'
Scale to use for the z axis of the contour plots.
Either 'log' or linear for all other choices.
ax : `Matplotlib.Axes`, default: None
When set, everything is plotted inside this axis.
Returns
-------
ax : `Matplotlib.Axes`
The Matplotlib Figure-object.
For example, you can save the plot by calling
`fig.savefig('file.png')`
"""
# Get the search-space instance from the optimization results.
space = result.space
x_vals = _evaluate_min_params(result, minimum, n_minimum_search)
if sample_source == "random":
x_eval = None
samples = space.transform(space.rvs(n_samples=n_samples))
else:
x_eval = _evaluate_min_params(result, sample_source,
n_minimum_search)
samples = space.transform([x_eval])
x_samples, x_minimum, _ = _map_categories(space, result.x_iters, x_vals)
# Get the dimension-object, its index in the search-space, and its name.
index1, dimension1 = space[dimension_identifier1]
index2, dimension2 = space[dimension_identifier2]
# Get the samples from the optimization-log for the relevant dimensions.
# samples1 = get_samples_dimension(result=result, index=index1)
samples1 = x_samples[:, index1]
samples2 = x_samples[:, index2]
# samples2 = get_samples_dimension(result=result, index=index2)
# Get the best-found samples for the relevant dimensions.
best_sample1 = x_minimum[index1]
best_sample2 = x_minimum[index2]
# Get the last fitted model for the search-space.
last_model = result.models[-1]
# Estimate the objective function for these sampled points
# using the last fitted model for the search-space.
xi, yi, zi = partial_dependence_2D(space, last_model, index2, index1,
samples, n_points=n_points)
if ax is None:
ax = plt.gca()
# Scale for the z-axis of the contour-plot. Either Log or Linear (None).
locator = LogLocator() if zscale == 'log' else None
# Plot the contour-landscape for the objective function.
ax.contourf(xi, yi, zi, levels, locator=locator, cmap='viridis_r')
# Plot all the parameters that were sampled during optimization.
# These are plotted as small black dots.
ax.scatter(samples1, samples2, c='black', s=10, linewidths=1)
# Plot the best parameters that were sampled during optimization.
# These are plotted as a big red star.
ax.scatter(best_sample1, best_sample2,
c='red', s=50, linewidths=1, marker='*')
# Use the dimension-names as the labels for the plot-axes.
ax.set_xlabel(dimension1.name)
ax.set_ylabel(dimension2.name)
ax.autoscale(enable=True, axis='x', tight=True)
ax.autoscale(enable=True, axis='y', tight=True)
# Use log-scale on the x-axis?
if dimension1.prior == 'log-uniform':
ax.set_xscale('log')
# Use log-scale on the y-axis?
if dimension2.prior == 'log-uniform':
ax.set_yscale('log')
return ax
def plot_histogram(result, dimension_identifier, bins=20, rotate_labels=0,
ax=None):
"""
Create and return a Matplotlib figure with a histogram
of the samples from the optimization results,
for a given dimension of the search-space.
Parameters
----------
result : `OptimizeResult`
The optimization results e.g. from calling `gp_minimize()`.
dimension_identifier : str or int
Name or index of a dimension in the search-space.
bins : int, bins=20
Number of bins in the histogram.
rotate_labels : int, rotate_labels=0
Degree to rotate category-names on the x-axis.
Only used for Categorical dimensions.
Returns
-------
ax : `Matplotlib.Axes`
The Matplotlib Axes-object.
"""
# Get the search-space instance from the optimization results.
space = result.space
# Get the dimension-object.
index, dimension = space[dimension_identifier]
# Get the samples from the optimization-log for that particular dimension.
samples = [x[index] for x in result.x_iters]
if ax is None:
ax = plt.gca()
if isinstance(dimension, Categorical):
# When the search-space dimension is Categorical, it means
# that the possible values are strings. Matplotlib's histogram
# does not support this, so we have to make a bar-plot instead.
# NOTE: This only shows the categories that are in the samples.
# So if a category was not sampled, it will not be shown here.
# Count the number of occurrences of the string-categories.
counter = Counter(samples)
# The counter returns a dict where the keys are the category-names
# and the values are the number of occurrences for each category.
names = list(counter.keys())
counts = list(counter.values())
# Although Matplotlib's docs indicate that the bar() function
# can take a list of strings for the x-axis, it doesn't appear to work.
# So we hack it by creating a list of integers and setting the
# tick-labels with the category-names instead.
x = np.arange(len(counts))
# Plot using bars.
ax.bar(x, counts, tick_label=names)
# Adjust the rotation of the category-names on the x-axis.
ax.set_xticklabels(labels=names, rotation=rotate_labels)
else:
# Otherwise the search-space Dimension is either integer or float,
# in which case the histogram can be plotted more easily.
if dimension.prior == 'log-uniform':
# Map the number of bins to a log-space for the dimension bounds.
bins_mapped = np.logspace(*np.log10(dimension.bounds), bins)
| |
# Copyright 2015 Spotify AB. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# python std lib
import re
import copy
from collections import defaultdict
# third party libs
from lxml import etree as ETREE
import xml.etree.ElementTree as ET
from netaddr import IPAddress
from netaddr.core import AddrFormatError
from pyIOSXR import IOSXR
from pyIOSXR.exceptions import ConnectError
from pyIOSXR.exceptions import TimeoutError
from pyIOSXR.exceptions import InvalidInputError
# napalm_base
from napalm_base.helpers import convert, find_txt, mac, ip
from napalm_base.base import NetworkDriver
from napalm_base.utils import string_parsers
from napalm_base.exceptions import ConnectionException, MergeConfigException, ReplaceConfigException,\
CommandErrorException, CommandTimeoutException
class IOSXRDriver(NetworkDriver):
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.pending_changes = False
self.replace = False
if optional_args is None:
optional_args = {}
self.port = optional_args.get('port', 22)
self.lock_on_connect = optional_args.get('config_lock', True)
self.device = IOSXR(hostname, username, password, timeout=timeout, port=self.port, lock=self.lock_on_connect)
def open(self):
try:
self.device.open()
except ConnectError as conn_err:
raise ConnectionException(conn_err.message)
def close(self):
self.device.close()
def load_replace_candidate(self, filename=None, config=None):
self.pending_changes = True
self.replace = True
if not self.lock_on_connect:
self.device.lock()
try:
self.device.load_candidate_config(filename=filename, config=config)
except InvalidInputError as e:
self.pending_changes = False
self.replace = False
raise ReplaceConfigException(e.message)
def load_merge_candidate(self, filename=None, config=None):
self.pending_changes = True
self.replace = False
if not self.lock_on_connect:
self.device.lock()
try:
self.device.load_candidate_config(filename=filename, config=config)
except InvalidInputError as e:
self.pending_changes = False
self.replace = False
raise MergeConfigException(e.message)
def compare_config(self):
if not self.pending_changes:
return ''
elif self.replace:
return self.device.compare_replace_config().strip()
else:
return self.device.compare_config().strip()
def commit_config(self):
if self.replace:
self.device.commit_replace_config()
else:
self.device.commit_config()
self.pending_changes = False
if not self.lock_on_connect:
self.device.unlock()
def discard_config(self):
self.device.discard_config()
self.pending_changes = False
if not self.lock_on_connect:
self.device.unlock()
def rollback(self):
self.device.rollback()
# perhaps both should be moved in napalm_base.helpers at some point
@staticmethod
def _find_txt(xml_tree, path, default = ''):
try:
return xml_tree.find(path).text.strip()
except Exception:
return default
@staticmethod
def _convert(to, who, default = u''):
if who is None:
return default
try:
return to(who)
except:
return default
def get_facts(self):
facts = {
'vendor': u'Cisco',
'os_version': u'',
'hostname': u'',
'uptime': -1,
'serial_number': u'',
'fqdn': u'',
'model': u'',
'interface_list': []
}
facts_rpc_request = (
'<Get>'
'<Operational>'
'<SystemTime/>'
'<PlatformInventory/>'
'</Operational>'
'</Get>'
)
facts_rpc_reply = ETREE.fromstring(self.device.make_rpc_call(facts_rpc_request))
system_time_xpath = './/SystemTime/Uptime'
platform_attr_xpath = './/RackTable/Rack/Attributes/BasicInfo'
system_time_tree = facts_rpc_reply.xpath(system_time_xpath)[0]
platform_attr_tree = facts_rpc_reply.xpath(platform_attr_xpath)[0]
hostname = convert(unicode, find_txt(system_time_tree, 'Hostname'))
uptime = convert(int, find_txt(system_time_tree, 'Uptime'), -1)
serial = convert(unicode, find_txt(platform_attr_tree, 'SerialNumber'))
os_version = convert(unicode, find_txt(platform_attr_tree, 'SoftwareRevision'))
model = convert(unicode, find_txt(platform_attr_tree, 'ModelName'))
interface_list = self.get_interfaces().keys()
facts.update({
'os_version': os_version,
'hostname': hostname,
'model': model,
'uptime': uptime,
'serial_number': serial,
'fqdn': hostname,
'interface_list': interface_list
})
return facts
def get_interfaces(self):
interfaces = {}
INTERFACE_DEFAULTS = {
'is_enabled': False,
'is_up': False,
'mac_address': u'',
'description': u'',
'speed': -1,
'last_flapped': -1.0
}
interfaces_rpc_request = '<Get><Operational><Interfaces/></Operational></Get>'
interfaces_rpc_reply = ETREE.fromstring(self.device.make_rpc_call(interfaces_rpc_request))
for interface_tree in interfaces_rpc_reply.xpath('.//Interfaces/InterfaceTable/Interface'):
interface_name = find_txt(interface_tree, 'Naming/InterfaceName')
if not interface_name:
continue
is_up = (find_txt(interface_tree, 'LineState') == 'IM_STATE_UP')
is_enabled = (find_txt(interface_tree, 'LineState') == 'IM_STATE_UP')
mac_address = mac(find_txt(interface_tree, 'MACAddress/Address'))
speed = int(convert(int, find_txt(interface_tree, 'Bandwidth'), 0) * 1e-3)
description = find_txt(interface_tree, 'Description')
interfaces[interface_name] = copy.deepcopy(INTERFACE_DEFAULTS)
interfaces[interface_name].update({
'is_up': is_up,
'speed': speed,
'is_enabled': is_enabled,
'mac_address': mac_address,
'description': description
})
return interfaces
def get_interfaces_counters(self):
rpc_command = "<Get><Operational><Interfaces><InterfaceTable></InterfaceTable></Interfaces></Operational></Get>"
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
interface_counters = dict()
for interface in result_tree.iter('Interface'):
interface_name = interface.find('InterfaceHandle').text
interface_stats = dict()
if interface.find('InterfaceStatistics') is None:
continue
else:
interface_stats = dict()
interface_stats['tx_multicast_packets'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/MulticastPacketsSent').text)
interface_stats['tx_discards'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/OutputDrops').text)
interface_stats['tx_octets'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/BytesSent').text)
interface_stats['tx_errors'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/OutputErrors').text)
interface_stats['rx_octets'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/BytesReceived').text)
interface_stats['tx_unicast_packets'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/PacketsSent').text)
interface_stats['rx_errors'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/InputErrors').text)
interface_stats['tx_broadcast_packets'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/BroadcastPacketsSent').text)
interface_stats['rx_multicast_packets'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/MulticastPacketsReceived').text)
interface_stats['rx_broadcast_packets'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/BroadcastPacketsReceived').text)
interface_stats['rx_discards'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/InputDrops').text)
interface_stats['rx_unicast_packets'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/PacketsReceived').text)
interface_counters[interface_name] = interface_stats
return interface_counters
def get_bgp_neighbors(self):
def generate_vrf_query(vrf_name):
"""
Helper to provide XML-query for the VRF-type we're interested in.
"""
if vrf_name == "global":
rpc_command = """<Get>
<Operational>
<BGP>
<InstanceTable>
<Instance>
<Naming>
<InstanceName>
default
</InstanceName>
</Naming>
<InstanceActive>
<DefaultVRF>
<GlobalProcessInfo>
</GlobalProcessInfo>
<NeighborTable>
</NeighborTable>
</DefaultVRF>
</InstanceActive>
</Instance>
</InstanceTable>
</BGP>
</Operational>
</Get>"""
else:
rpc_command = """<Get>
<Operational>
<BGP>
<InstanceTable>
<Instance>
<Naming>
<InstanceName>
default
</InstanceName>
</Naming>
<InstanceActive>
<VRFTable>
<VRF>
<Naming>
%s
</Naming>
<GlobalProcessInfo>
</GlobalProcessInfo>
<NeighborTable>
</NeighborTable>
</VRF>
</VRFTable>
</InstanceActive>
</Instance>
</InstanceTable>
</BGP>
</Operational>
</Get>""" % vrf_name
return rpc_command
"""
Initial run to figure out what VRF's are available
Decided to get this one from Configured-section because bulk-getting all instance-data to do the same could get ridiculously heavy
Assuming we're always interested in the DefaultVRF
"""
active_vrfs = ["global"]
rpc_command = """<Get>
<Operational>
<BGP>
<ConfigInstanceTable>
<ConfigInstance>
<Naming>
<InstanceName>
default
</InstanceName>
</Naming>
<ConfigInstanceVRFTable>
</ConfigInstanceVRFTable>
</ConfigInstance>
</ConfigInstanceTable>
</BGP>
</Operational>
</Get>"""
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
for node in result_tree.iter('ConfigVRF'):
active_vrfs.append(str(node.find('Naming/VRFName').text))
result = dict()
for vrf in active_vrfs:
rpc_command = generate_vrf_query(vrf)
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
this_vrf = dict()
this_vrf['peers'] = dict()
if vrf == "global":
this_vrf['router_id'] = unicode(result_tree.find(
'Get/Operational/BGP/InstanceTable/Instance/InstanceActive/DefaultVRF/GlobalProcessInfo/VRF/RouterID').text)
else:
this_vrf['router_id'] = unicode(result_tree.find(
'Get/Operational/BGP/InstanceTable/Instance/InstanceActive/VRFTable/VRF/GlobalProcessInfo/VRF/RouterID').text)
neighbors = dict()
for neighbor in result_tree.iter('Neighbor'):
this_neighbor = dict()
this_neighbor['local_as'] = int(neighbor.find('LocalAS').text)
this_neighbor['remote_as'] = int(neighbor.find('RemoteAS').text)
this_neighbor['remote_id'] = unicode(neighbor.find('RouterID').text)
if neighbor.find('ConnectionAdminStatus').text is "1":
this_neighbor['is_enabled'] = True
try:
this_neighbor['description'] = unicode(neighbor.find('Description').text)
except AttributeError:
this_neighbor['description'] = u''
this_neighbor['is_enabled'] = str(neighbor.find('ConnectionAdminStatus').text) is "1"
if str(neighbor.find('ConnectionAdminStatus').text) is "1":
this_neighbor['is_enabled'] = True
else:
this_neighbor['is_enabled'] = False
if str(neighbor.find('ConnectionState').text) == "BGP_ST_ESTAB":
this_neighbor['is_up'] = True
this_neighbor['uptime'] = int(neighbor.find('ConnectionEstablishedTime').text)
else:
this_neighbor['is_up'] = False
this_neighbor['uptime'] = -1
this_neighbor['address_family'] = dict()
if neighbor.find('ConnectionRemoteAddress/AFI').text == "IPv4":
this_afi = "ipv4"
elif neighbor.find('ConnectionRemoteAddress/AFI').text == "IPv6":
this_afi = "ipv6"
else:
this_afi = neighbor.find('ConnectionRemoteAddress/AFI').text
this_neighbor['address_family'][this_afi] = dict()
try:
this_neighbor['address_family'][this_afi][
"received_prefixes"] = int(neighbor.find('AFData/Entry/PrefixesAccepted').text) + int(
neighbor.find('AFData/Entry/PrefixesDenied').text)
this_neighbor['address_family'][this_afi][
"accepted_prefixes"] = int(neighbor.find('AFData/Entry/PrefixesAccepted').text)
this_neighbor['address_family'][this_afi][
"sent_prefixes"] = int(neighbor.find('AFData/Entry/PrefixesAdvertised').text)
except AttributeError:
this_neighbor['address_family'][this_afi]["received_prefixes"] = -1
this_neighbor['address_family'][this_afi]["accepted_prefixes"] = -1
this_neighbor['address_family'][this_afi]["sent_prefixes"] = -1
try:
neighbor_ip = unicode(neighbor.find('Naming/NeighborAddress/IPV4Address').text)
except AttributeError:
neighbor_ip = unicode(neighbor.find('Naming/NeighborAddress/IPV6Address').text)
neighbors[neighbor_ip] = this_neighbor
this_vrf['peers'] = neighbors
result[vrf] = this_vrf
return result
def get_environment(self):
def get_module_xml_query(module,selection):
return """<Get>
<AdminOperational>
<EnvironmentalMonitoring>
<RackTable>
<Rack>
<Naming>
<rack>0</rack>
</Naming>
<SlotTable>
<Slot>
<Naming>
<slot>%s</slot>
</Naming>
%s
</Slot>
</SlotTable>
</Rack>
</RackTable>
</EnvironmentalMonitoring>
</AdminOperational>
</Get>""" % (module,selection)
environment_status = dict()
environment_status['fans'] = dict()
environment_status['temperature'] = dict()
environment_status['power'] = dict()
environment_status['cpu'] = dict()
environment_status['memory'] = int()
# finding slots with equipment we're interested in
rpc_command = """<Get>
<AdminOperational>
<PlatformInventory>
<RackTable>
<Rack>
<Naming>
<Name>0</Name>
</Naming>
<SlotTable>
</SlotTable>
</Rack>
</RackTable>
</PlatformInventory>
</AdminOperational>
</Get>"""
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
active_modules = defaultdict(list)
for slot in result_tree.iter("Slot"):
for card in slot.iter("CardTable"):
#find enabled slots, figoure out type and save for later
if card.find('Card/Attributes/FRUInfo/ModuleAdministrativeState').text == "ADMIN_UP":
slot_name = slot.find('Naming/Name').text
module_type = re.sub("\d+", "", slot_name)
if len(module_type) > 0:
active_modules[module_type].append(slot_name)
else:
active_modules["LC"].append(slot_name)
#
# PSU's
#
for psu in active_modules['PM']:
if psu in ["PM6", "PM7"]: # Cisco bug, chassis difference V01<->V02
continue
rpc_command = get_module_xml_query(psu,'')
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
psu_status = dict()
psu_status['status'] = False
psu_status['capacity'] = float()
psu_status['output'] = float()
for sensor in result_tree.iter('SensorName'):
if sensor.find('Naming/Name').text == "host__VOLT":
this_psu_voltage = float(sensor.find('ValueBrief').text)
elif sensor.find('Naming/Name').text == "host__CURR":
this_psu_current = float(sensor.find('ValueBrief').text)
elif sensor.find('Naming/Name').text == "host__PM":
this_psu_capacity = float(sensor.find('ValueBrief').text)
if this_psu_capacity > 0:
psu_status['capacity'] = this_psu_capacity
psu_status['status'] = True
if this_psu_current and this_psu_voltage:
psu_status['output'] = (this_psu_voltage * this_psu_current) / 1000000.0
environment_status['power'][psu] = psu_status
#
# Memory
#
rpc_command = "<Get><AdminOperational><MemorySummary></MemorySummary></AdminOperational></Get>"
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
for node in result_tree.iter('Node'):
print
if node.find('Naming/NodeName/Slot').text == active_modules['RSP'][0]: # first enabled RSP
available_ram = int(node.find('Summary/SystemRAMMemory').text)
free_ram = int(node.find('Summary/FreeApplicationMemory').text)
break # we're only looking at one of the RSP's
if available_ram and free_ram:
used_ram = available_ram - free_ram
memory = dict()
memory['available_ram'] = available_ram
memory['used_ram'] = used_ram
environment_status['memory'] = memory
#
# Fans
#
for fan in active_modules['FT']:
rpc_command = get_module_xml_query(fan,'')
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
for module in result_tree.iter('Module'):
for sensortype in module.iter('SensorType'):
for sensorname in sensortype.iter('SensorNameTable'):
if sensorname.find('SensorName/Naming/Name').text == "host__FanSpeed_0":
environment_status['fans'][fan] = {'status': int(sensorname.find(
'SensorName/ValueDetailed/Status').text) is 1}
#
# CPU
#
cpu = dict()
rpc_command = "<Get><Operational><SystemMonitoring></SystemMonitoring></Operational></Get>"
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
for module in result_tree.iter('CPUUtilization'):
this_cpu = dict()
this_cpu["%usage"] = float(module.find('TotalCPUFiveMinute').text)
rack = module.find('Naming/NodeName/Rack').text
slot = module.find('Naming/NodeName/Slot').text
instance = module.find('Naming/NodeName/Instance').text
position = "%s/%s/%s" % (rack,slot,instance)
cpu[position] = this_cpu
environment_status["cpu"] = cpu
#
# Temperature
#
temperature = | |
<reponame>MauroCL75/Gerald<gh_stars>0
"""
Introduction
============
Capture, document and manage database schemas.
This is the Schema module, it contains one useful class, Schema. This is a
super class which is then sub-classed for specific databases (eg.
OracleSchema, MySQLSchema, etc).
A schema is comprised of collections of tables, views, stored code objects,
triggers, sequences and other assorted 'objects'
This module is licensed under the BSD License (see LICENSE.txt)
This module requires Python 2.3 (and above) and a valid DB-API module
To do
=====
- change get_ddl method(s) to put different objects in different files like
Oracle Designer (one each for tables, constraints, views, code objects)
- Table and View dump methods rely on calc_precision function, we really
should do away with this
- One possible solution is to make Column a class and just implement a dump
method for that
Possible Future development
===========================
- Support the Jakarta Torque schema DTD
(http://db.apache.org/torque/schema-reference.html)
- Or possibly the Dewdrop DTD
(http://dewdrop.sourceforge.net/xmlns/database.xsd)
- Change the compare method for the Schema class. Perhaps using the difflib
library module?
- Change the to_xml methods to use ElementTree elements rather than strings
"""
__author__ = "<NAME> <<EMAIL>>"
__date__ = (2010, 4, 8)
__version__ = (0, 4, 1)
from decimal import Decimal
import os
import sys
from gerald.utilities.dburi import get_connection
from gerald.utilities.Log import get_log
if 'TMP' in os.environ:
LOG_DIRECTORY = os.environ['TMP']
elif 'HOME' in os.environ:
LOG_DIRECTORY = os.environ['HOME']
LOG_FILENAME = os.path.join(LOG_DIRECTORY, 'gerald.log')
LOG = get_log('gerald', LOG_FILENAME, 'INFO')
# LOG = get_log('gerald', LOG_FILENAME, 'INFO')
# LOG = get_log('gerald', LOG_FILENAME, 'DEBUG')
class Schema(object):
"""
A representation of a database schema.
A schema is a collection of objects within a database. It is a logical
grouping, physical implementation is independent of the schema.
This is an abstract class which shouldn't be used directly. It is designed
to be sub-classed in database specific modules.
These sub-classes will need to implement the _get_schema and __init__ methods
A schema will have the following attributes
- name. This will be the same as the connectionString
- api_version. To indicate when we are change the API
- schema. A collection of objects which form this schema.
Private attributes
- _db. A database connection. Optional, need not be provided by sub-classes.
- _cursor. A cursor generated from _db
"""
def __init__(self, schema_name, connection_string=None, omit_error_objects=False):
"""
Initialise the schema.
@param schema_name: A name for this schema
@type schema_name: String
@param connection_string: If this is provided then we populate the
schema's attributes from the database it connects us to.
@type connection_string: String
@return: Success or failure
@rtype: Boolean
"""
self.name = schema_name
self.api_version = Decimal('1.1')
self.omit_error_objects = omit_error_objects
self.schema = {}
if connection_string:
# Connect to the db and suck out the data dictionary information
self.connect(connection_string)
self.schema = self._get_schema(self._cursor)
def connect(self, connection_string):
"Connect to a database and set _db and _cursor attributes"
LOG.debug('Connecting to %s' % self.name)
self._db = get_connection(connection_string)
self._cursor = self._db.cursor()
LOG.debug('Established connection to %s' % self.name)
def _get_schema(self, cursor):
"Place holder method to be implemented by child classes"
raise NotImplementedError
def dump(self, file_name=None, sort=None):
"""
Output this schema in a nice easy to read format to <file_name>. If a
<file_name> isn't provided then we return the stream.
We rely on each object to output its own details.
@param file_name: The name of a file to dump the output to
@type file_name: String
@param sort: If this is set the schema objects will be sorted by name
@type sort: Boolean
@return: Schema contents or, if file_name is specified, nothing
@rtype: String
"""
if file_name:
dump_file = open(file_name, 'w')
results = ["Schema: %s\n" % self.name]
objects = list(self.schema.keys())
if sort:
objects.sort()
for schema_object in objects:
results.append(self.schema[schema_object].dump())
if file_name:
dump_file.write('\n'.join(results))
dump_file.close()
else:
return '\n'.join(results)
def to_xml(self, file_name=None):
"""
Output this schema in XML format to <file_name>. If a <file_name> isn't
provided then we return the stream.
We rely on each object to produce its own XML fragment which are then
combined here.
@param file_name: The name of a file to dump the XML to
@type file_name: String
@return: Schema XML or, if file_name is specified, nothing
@rtype: String
"""
if file_name:
xml_file = open(file_name, 'w')
results = ['<schema name="%s">' % self.name]
for schema_object in list(self.schema.keys()):
results.append(self.schema[schema_object].to_xml())
results.append('</schema>')
if file_name:
xml_file.write('\n'.join(results))
xml_file.close()
else:
return '\n'.join(results)
def get_ddl(self, file_name=None):
"""
Output the DDL to create this schema to <file_name>. If a <file_name>
isn't provided then we return the stream.
We rely on each schema object to produce its own DDL statements which are
then combined here.
@param file_name: The name of a file to dump the XML to
@type file_name: String
@return: Schema DDL or, if file_name is specified, nothing
@rtype: String
"""
results = []
for schema_object in list(self.schema.keys()):
results.append(self.schema[schema_object].get_ddl())
if file_name:
ddl_file = open(file_name, 'w')
ddl_file.write('\n'.join(results))
ddl_file.close()
else:
return '\n'.join(results)
def __cmp__(self, other_schema):
"""
Compare this schema with <other_schema>
@param other_schema: A schema to be compared to self
@type other_schema: An object of a class inherited from schema.Schema
@return: 0 if the two schemas are the same, otherwise we return 1
@rtype: Boolean, well Integer really
"""
# __cmp__ functions return -1 if we are less than schema
# 0 if we are the same as schema
# 1 if we are greater than schema
# If our 'compare' method returns anything there are differences
if self.compare(other_schema):
return 1
else:
return 0
def compare(self, other_schema):
"""
Calculate the differences between the current schema and <other_schema>.
@param other_schema: A schema to be compared to self
@type other_schema: An object of a class inherited from schema.Schema
@return: The differences between the two schemas
@rtype: String
"""
# I left a note here about difflib, but can't find it. Oh dear.
results = []
if not isinstance(other_schema, Schema):
results.append('We are not comparing two schemas')
else:
if list(self.schema.keys()) != list(other_schema.schema.keys()):
results.append('The schemas have different objects')
for schema_object in list(self.schema.keys()):
if schema_object in list(other_schema.schema.keys()):
if self.schema[schema_object].__class__ != other_schema.schema[schema_object].__class__:
results.append('%s is of different types' % schema_object)
results.append('in the two schemas')
if self.schema[schema_object] != other_schema.schema[schema_object]:
results.append('%s is different ' % schema_object)
results.append('in the two schemas')
else:
results.append('%s is missing ' % schema_object)
results.append('in the one schema')
return ' '.join(results)
def _set_unless_fail(self, schema, key, object_name, object_type, *args):
"""Try and create object_name in schema taking appropriate action on failure
This method is particularly useful if someone (say, Oracle) has broken
their own rules and created system objects that trip up Gerald. By
setting omit_error_objects to True when creating our schema we can not
blow up when finding something that doesn't quite fulfil our
expectations.
Because we are maintaining compatibility with Python 2.5 we can't access
the exception instance so the log message won't necessarily be that
useful.
"""
if (not object_name.isupper()) and ('"' not in object_name):
# then this must be a chaotic evil case-sensitive object name
object_name = '"%s"' % object_name
try:
schema[key] = object_type(object_name, *args)
except AttributeError:
if self.omit_error_objects:
LOG.warning("Couldn't get details for %s" % (key, ))
else:
raise
class Table(object):
"""
A representation of a database table.
A table is made up of columns and will have indexes, triggers, constraints,
primary and foreign keys.
It may also have comments - although this is currently only available in Oracle
This is an abstract class which shouldn't be used. It is designed to be
sub-classed in database specific modules.
The sub-classes will need to implement the L{_get_table} and L{get_ddl}
methods
They will also need a class method called calc_precision, whose signature
will depend on the module
A table will have the following attributes
- name
- columns. A dictionary (keyed on column name) of column dictionaries.
These column dictionaries must have the following keys:
- sequence. The order of this column in the table. Integer
- name. Column name. Text
- type. Native data type, will vary by database. Text
- | |
target
assert output.min_vals == lower_bound.min()
assert output.max_vals == upper_bound.min()
def test_min_args(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
highest: int,
) -> None:
"""Test the hundred different args that exist for min()"""
tensor = SEPT(
child=reference_data, max_vals=upper_bound, min_vals=lower_bound, entity=ishan
)
# Test axis
output = tensor.min(axis=1)
target = reference_data.min(axis=1)
assert (output.child == target).all()
assert (output.min_vals == lower_bound.min(axis=1)).all()
assert (output.max_vals == upper_bound.min(axis=1)).all()
# Test keepdims
output = tensor.min(keepdims=True)
target = reference_data.min(keepdims=True)
assert (output.child == target).all()
assert (output.min_vals == lower_bound.min(keepdims=True)).all()
assert (output.max_vals == upper_bound.min(keepdims=True)).all()
# Test initial
output = tensor.min(initial=-highest)
target = reference_data.min(initial=-highest)
assert (output.child == target).all()
assert (output.min_vals == lower_bound.min(initial=-highest)).all()
assert (output.max_vals == upper_bound.min(initial=-highest)).all()
def test_max(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Test min"""
tensor = SEPT(
child=reference_data, max_vals=upper_bound, min_vals=lower_bound, entity=ishan
)
output = tensor.max()
target = reference_data.max()
assert output.child == target
assert output.min_vals == lower_bound.max()
assert output.max_vals == upper_bound.max()
def test_max_args(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
highest: int,
) -> None:
"""Test the hundred different args that exist for max()"""
tensor = SEPT(
child=reference_data, max_vals=upper_bound, min_vals=lower_bound, entity=ishan
)
# Test axis
output = tensor.max(axis=1)
target = reference_data.max(axis=1)
assert (output.child == target).all()
assert (output.min_vals == lower_bound.max(axis=1)).all()
assert (output.max_vals == upper_bound.max(axis=1)).all()
# Test keepdims
output = tensor.max(keepdims=True)
target = reference_data.max(keepdims=True)
assert (output.child == target).all()
assert (output.min_vals == lower_bound.max(keepdims=True)).all()
assert (output.max_vals == upper_bound.max(keepdims=True)).all()
# Test initial
output = tensor.max(initial=-highest)
target = reference_data.max(initial=-highest)
assert (output.child == target).all()
assert (output.min_vals == lower_bound.max(initial=-highest)).all()
assert (output.max_vals == upper_bound.max(initial=-highest)).all()
@pytest.mark.skip(reason="Not supporting for this release")
def test_mod_array(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
traskmaster: Entity,
reference_scalar_manager: VirtualMachinePrivateScalarManager,
) -> None:
"""Test mod"""
reference_tensor = SEPT(
child=reference_data,
max_vals=upper_bound,
min_vals=lower_bound,
entity=ishan,
scalar_manager=reference_scalar_manager,
)
other = np.ones_like(reference_data)
output = reference_tensor % other
assert isinstance(output, SEPT)
assert output.shape == reference_tensor.shape
assert (output.child == np.zeros_like(reference_data)).all()
assert (output.max_vals == np.zeros_like(upper_bound)).all()
assert (output.min_vals == np.zeros_like(lower_bound)).all()
other = np.ones_like(reference_data) * 4
output = reference_tensor % other
assert isinstance(output, SEPT)
assert output.shape == reference_tensor.shape
assert (output.child == reference_data % 4).all()
assert (output.max_vals == upper_bound % 4).all()
assert (output.min_vals == lower_bound % 4).all()
@pytest.mark.skip(reason="Not supporting for this release")
def test_mod_sept(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
traskmaster: Entity,
reference_scalar_manager: VirtualMachinePrivateScalarManager,
) -> None:
"""Test mod with public SEPT"""
reference_tensor = SEPT(
child=reference_data,
max_vals=upper_bound,
min_vals=lower_bound,
entity=ishan,
scalar_manager=reference_scalar_manager,
)
other = SEPT(
child=np.ones_like(reference_data),
max_vals=np.ones_like(reference_data) * 5,
min_vals=np.ones_like(reference_data),
entity=ishan,
scalar_manager=reference_scalar_manager,
)
output = reference_tensor % other
assert isinstance(output, SEPT)
assert output.shape == reference_tensor.shape
assert (output.child == np.zeros_like(reference_data)).all()
assert (output.max_vals == np.zeros_like(upper_bound) % 5).all()
assert (
output.min_vals == np.zeros_like(lower_bound)
).all() # Beware of division by 0 error
other = SEPT(
child=np.ones_like(reference_data) * 6,
max_vals=np.ones_like(reference_data) * 6,
min_vals=np.ones_like(reference_data),
entity=ishan,
scalar_manager=reference_scalar_manager,
)
output = reference_tensor % other
assert isinstance(output, SEPT)
assert output.shape == reference_tensor.shape
assert (output.child == reference_data % 6).all()
assert (output.max_vals == upper_bound % 6).all()
assert (
output.min_vals == np.zeros_like(lower_bound)
).all() # Beware of division by 0 error
@pytest.mark.skip(reason="Not supporting for this release")
def test_divmod_array(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
traskmaster: Entity,
reference_scalar_manager: VirtualMachinePrivateScalarManager,
) -> None:
reference_tensor = SEPT(
child=reference_data,
max_vals=upper_bound,
min_vals=lower_bound,
entity=ishan,
scalar_manager=reference_scalar_manager,
)
other = np.ones_like(reference_data) * 4
quotient, remainder = reference_tensor.__divmod__(other)
assert isinstance(quotient, SEPT)
assert isinstance(remainder, SEPT)
assert quotient.shape == reference_tensor.shape
assert remainder.shape == reference_tensor.shape
assert (quotient.child == reference_data // 4).all()
assert (remainder.child == reference_data % 4).all()
assert (quotient.max_vals == upper_bound // 4).all()
assert (remainder.max_vals == upper_bound % 4).all()
assert (quotient.min_vals == lower_bound // 4).all()
assert (remainder.min_vals == lower_bound % 4).all()
@pytest.mark.skip(reason="Not supporting for this release")
def test_divmod_sept(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
traskmaster: Entity,
reference_scalar_manager: VirtualMachinePrivateScalarManager,
) -> None:
reference_tensor = SEPT(
child=reference_data,
max_vals=upper_bound,
min_vals=lower_bound,
entity=ishan,
scalar_manager=reference_scalar_manager,
)
other = SEPT(
child=np.ones_like(reference_data),
max_vals=np.ones_like(reference_data) * 5,
min_vals=np.ones_like(reference_data),
entity=ishan,
scalar_manager=reference_scalar_manager,
)
quotient, remainder = reference_tensor.__divmod__(other)
assert isinstance(quotient, SEPT)
assert isinstance(remainder, SEPT)
assert quotient.shape == reference_tensor.shape
assert remainder.shape == reference_tensor.shape
assert (quotient.child == reference_data).all()
assert (remainder.child == np.zeros_like(reference_data)).all()
assert (quotient.max_vals == upper_bound // 5).all()
assert (remainder.max_vals == np.zeros_like(upper_bound)).all()
assert (quotient.min_vals == lower_bound).all() # Beware of division by 0 error
assert (
remainder.min_vals == np.zeros_like(lower_bound)
).all() # Beware of division by 0 error
other = SEPT(
child=np.ones_like(reference_data) * 6,
max_vals=np.ones_like(reference_data) * 6,
min_vals=np.ones_like(reference_data),
entity=ishan,
scalar_manager=reference_scalar_manager,
)
quotient, remainder = reference_tensor.__divmod__(other)
assert isinstance(quotient, SEPT)
assert isinstance(remainder, SEPT)
assert quotient.shape == reference_tensor.shape
assert remainder.shape == reference_tensor.shape
assert (quotient.child == reference_data // 6).all()
assert (remainder.child == reference_data % 6).all()
assert (quotient.max_vals == upper_bound // 6).all()
assert (remainder.max_vals == upper_bound % 6).all()
assert (quotient.min_vals == lower_bound).all() # Beware of division by 0 error
assert (
remainder.min_vals == np.zeros_like(lower_bound)
).all() # Beware of division by 0 error
def test_matmul_array(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
traskmaster: Entity,
reference_scalar_manager: VirtualMachinePrivateScalarManager,
) -> None:
reference_tensor = SEPT(
child=reference_data,
max_vals=upper_bound,
min_vals=lower_bound,
entity=ishan,
scalar_manager=reference_scalar_manager,
)
other = np.ones_like(reference_data.T) * 5
output = reference_tensor.__matmul__(other)
assert output.shape[0] == reference_data.shape[0]
assert output.shape[1] == other.shape[1]
assert (output.child == reference_data.__matmul__(other)).all()
def test_matmul_sept(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
traskmaster: Entity,
reference_scalar_manager: VirtualMachinePrivateScalarManager,
) -> None:
reference_tensor = SEPT(
child=reference_data,
max_vals=upper_bound,
min_vals=lower_bound,
entity=ishan,
scalar_manager=reference_scalar_manager,
)
data = np.ones_like(reference_data.T) * 5
other = SEPT(
child=data,
max_vals=np.ones_like(data) * 10,
min_vals=np.ones_like(data),
entity=ishan,
scalar_manager=reference_scalar_manager,
)
output = reference_tensor.__matmul__(other)
assert output.shape[0] == reference_data.shape[0]
assert output.shape[1] == other.shape[1]
assert (output.child == reference_data.__matmul__(other.child)).all()
def test_trace(
reference_data: np.ndarray, upper_bound: np.ndarray, lower_bound: np.ndarray
) -> None:
"""Test whether the trace() method works"""
tensor = SEPT(
child=reference_data, max_vals=upper_bound, min_vals=lower_bound, entity=ishan
)
output = tensor.trace()
target = reference_data.trace()
assert (output.child == target).all()
assert output.min_vals == lower_bound.trace()
assert output.max_vals == upper_bound.trace()
def test_prod(
reference_data: np.ndarray, upper_bound: np.ndarray, lower_bound: np.ndarray
) -> None:
"""Test whether the prod() method works"""
tensor = SEPT(
child=reference_data, max_vals=upper_bound, min_vals=lower_bound, entity=ishan
)
output = tensor.prod()
target = reference_data.prod()
assert (output.child == target).all()
assert output.min_vals == lower_bound.prod()
assert output.max_vals == upper_bound.prod()
def test_round(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
reference_tensor = SEPT(
child=reference_data, max_vals=upper_bound, min_vals=lower_bound, entity=ishan
)
output = reference_tensor.round(decimals=0)
target = reference_data.astype(dtype=np.int32)
assert (output.child == target).all()
assert (output.min_vals == lower_bound.astype(dtype=np.int32)).all()
assert (output.max_vals == upper_bound.astype(dtype=np.int32)).all()
def test_entities(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Test that the n_entities works for SEPTs"""
tensor = SEPT(
child=reference_data, max_vals=upper_bound, min_vals=lower_bound, entity=ishan
)
assert isinstance(tensor, SEPT)
assert tensor.n_entities == 1
# End of Ishan's tests
@pytest.fixture
def ent() -> Entity:
return Entity(name="test")
@pytest.fixture
def ent2() -> Entity:
return Entity(name="test2")
@pytest.fixture
def reference_sept(entity_name, low, high) -> SEPT:
"""This is used to generate a Single Entity Phi Tensor with random values in [low, high)"""
child = np.random.randint(low=low, high=high, size=(dims, dims))
max_vals = np.full((dims, dims), high - 1, dtype=np.int32)
min_vals = np.full((dims, dims), low, dtype=np.int32)
entity = Entity(name=entity_name)
return SEPT(child=child, entity=entity, max_vals=max_vals, min_vals=min_vals)
def test_le_same_entities(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ent: Entity,
) -> None:
tensor1 = SEPT(
child=reference_data, entity=ent, max_vals=upper_bound, min_vals=lower_bound
)
# same data, same entity
tensor2 = SEPT(
child=reference_data, entity=ent, max_vals=upper_bound, min_vals=lower_bound
)
# different data, same entity
tensor3 = SEPT(
child=reference_data + 1, entity=ent, max_vals=upper_bound, min_vals=lower_bound
)
assert tensor1.__le__(tensor2).child.all()
assert tensor1.__le__(tensor3).child.all()
assert tensor1.__le__(reference_data).child.all()
def test_le_diff_entities(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ent: Entity,
ent2: Entity,
) -> None:
tensor1 = SEPT(
child=reference_data, entity=ent, max_vals=upper_bound, min_vals=lower_bound
)
# same data, different entity
tensor2 = SEPT(
child=reference_data, entity=ent2, max_vals=upper_bound, min_vals=lower_bound
)
result = tensor1 <= tensor2
assert isinstance(result, IGT)
assert result._values().all()
assert (result._max_values() == np.ones_like(result._max_values())).all()
assert (result._min_values() == np.zeros_like(result._min_values())).all()
def test_ge_same_entities(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ent: Entity,
) -> None:
tensor1 = SEPT(
child=reference_data, entity=ent, max_vals=upper_bound, min_vals=lower_bound
)
# same data, same entity
tensor2 = SEPT(
child=reference_data, entity=ent, max_vals=upper_bound, min_vals=lower_bound
)
# different data, same entity
tensor3 = SEPT(
child=reference_data + 1, entity=ent, max_vals=upper_bound, min_vals=lower_bound
)
assert tensor1.__ge__(tensor2).child.all()
assert tensor3.__ge__(tensor1).child.all()
assert tensor1.__ge__(reference_data).child.all()
def test_ge_diff_entities(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ent: Entity,
ent2: Entity,
) -> None:
tensor1 = SEPT(
child=reference_data, entity=ent, max_vals=upper_bound, min_vals=lower_bound
)
# same data, different entity
tensor2 = SEPT(
child=reference_data, entity=ent2, max_vals=upper_bound, min_vals=lower_bound
)
result = tensor1 <= tensor2
assert isinstance(result, IGT)
assert result._values().all()
assert (result._max_values() == np.ones_like(result._max_values())).all()
assert (result._min_values() == np.zeros_like(result._min_values())).all()
def test_lt_same_entities(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ent: Entity,
) -> None:
tensor1 = SEPT(
child=reference_data, entity=ent, max_vals=upper_bound, min_vals=lower_bound
)
# same data, same entity
tensor2 = SEPT(
child=reference_data, entity=ent, max_vals=upper_bound, min_vals=lower_bound
)
# different data, same | |
<gh_stars>1-10
#!/usr/bin/python
# Copyright 2013 MS Open Tech
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#check_azure_storage.py: Azure storage monitor script
"""Contains the nagios azure storage plugin code."""
import argparse
import azure
from azure.storage.cloudstorageaccount import CloudStorageAccount
import logging
import os
from azuremonitor.publishsettings import PublishSettings
from azure.servicemanagement import ServiceManagementService
import sys
from datetime import datetime
from datetime import timedelta
import exceptions
logger = None # pylint: disable-msg=C0103
COUNTERS = {
'ingress' : { 'help' : 'Get Total Ingress',
'measure' : 'TotalIngress',
'nagios_message' : 'Total incoming traffic %s',
'unit' : 'MB',
#'direction' : 'NA'
},
'egress' : { 'help' : 'Get Total Engress',
'measure' : 'TotalEgress',
'nagios_message' : 'Total outgoing traffic %s',
'unit' : 'MB',
#'direction' : 'NA'
},
'requests' : { 'help' : 'Get total requests',
'measure' : 'TotalRequests',
'nagios_message' : 'Total number of requests %s',
'unit' : '',
#'direction' : 'NA'
},
'billablerequests': { 'help' : 'Get Total billable requests',
'measure' : 'TotalBillableRequests',
'nagios_message' :
'Total number of billable requests %s',
'unit' : '',
#'direction' : 'NA'
},
'availability': { 'help' : 'Get availability',
'measure' : 'Availability',
'nagios_message' : 'Availability %s',
'unit' : '%',
#'direction' : 'NA',
},
'percentsuccess': { 'help' : 'Get percent success',
'measure' : 'PercentSuccess',
'nagios_message' :
'Successful requests out of total = %s',
'unit' : '%',
#'direction' : 'NA',
},
'e2elatency': { 'help' : 'Get E2E latency',
'measure' : 'AverageE2ELatency',
'nagios_message' : 'End to end latency %s',
'unit' : 'ms',
#'direction' : 'NA'
},
'srvlatency': { 'help' : 'Get Avg server latency',
'measure' : 'AverageServerLatency',
'nagios_message' : 'Server latency %s',
'unit' : 'ms',
#'direction' : 'NA'
},
'throttlingerr': { 'help' : 'Get percent throttling error',
'measure' : 'PercentThrottlingError',
'nagios_message' : 'Throttling error %s',
'unit' : '%',
#'direction' : 'NA'
},
'timeouterr': { 'help' : 'Get percent timeout error',
'measure' : 'PercentTimeoutError',
'nagios_message' : 'Timeout error %s',
'unit' : '%',
#'direction' : 'NA'
},
'srverror': { 'help' : 'Get percent server other error',
'measure' : 'PercentServerOtherError',
'nagios_message' : 'Other server error %s',
'unit' : '%',
#'direction' : 'NA'
},
'clienterror': { 'help' : 'Get percent client other error',
'measure' : 'PercentClientOtherError',
'nagios_message' : 'Client error %s',
'unit' : '%',
#'direction' : 'NA'
},
'anonclienterror': { 'help' : 'Get anon client other error',
'measure' : 'AnonymousClientOtherError',
'nagios_message' : 'Anonymous client error %s',
'unit' : '%',
#'direction' : 'NA'
},
}
def property_value(row, prop):
"""Get the value of the row/object property specified by prop."""
return {
'TotalIngress': row.TotalIngress,
'TotalEgress': row.TotalEgress,
'TotalRequests': row.TotalRequests,
'TotalBillableRequests': row.TotalBillableRequests,
'Availability': row.Availability,
'PercentSuccess': row.PercentSuccess,
'AverageE2ELatency': row.AverageE2ELatency,
'AverageServerLatency': row.AverageServerLatency,
'PercentThrottlingError': row.PercentThrottlingError,
'PercentTimeoutError': row.PercentTimeoutError,
'PercentServerOtherError': row.PercentServerOtherError,
'PercentClientOtherError': row.PercentClientOtherError,
'AnonymousClientOtherError': row.AnonymousClientOtherError
}[prop]
def handle_args():
"""Create the parser, parse the args, and return them."""
parser = argparse.ArgumentParser(description='Check Azure Storage',
epilog='(c) MS Open Tech')
parser.add_argument('storageact',
help='Storage account name to check')
parser.add_argument(
'-p', '--publish-settings',
required=True,
help='.publishsettings file to authenticate with azure',
dest='psfile')
if os.name == 'nt':
parser.add_argument(
'-f', '--certname',
required=False,
help='cert authentication with azure. needed on Windows',
dest='cert')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--table', action='store_const',
help="Check table service",
const='table', dest='type')
group.add_argument('--blob', action='store_const',
help="Check blob service",
const='blob', dest='type')
group.add_argument('--queue', action='store_const',
help="Check queue service",
const='queue', dest='type')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--tx', action='store_const', const='tx',
dest='subtype', help="Check transaction metrics")
group.add_argument('--cap', action='store_const',
const='cap', dest='subtype',
help="Check capacity metrics. Applies only to -blob")
parser.add_argument('-a', '--all', action='store_true',
help='Check all storage accounts, ignores storageact')
parser.add_argument('-w', '--warning', required=False, dest='warning',
help='Specify warning threshold')
parser.add_argument('-c', '--critical', required=False, dest='critical',
help='Specify critical threshold')
parser.add_argument('-k', '--key', required=False, dest='key',
help='Status/Counter to check')
parser.add_argument('-v', '--verbose', action='count',
default=0, help='verbosity')
parser.add_argument('--version', action='version', version='%(prog)s 0.1')
return parser.parse_args()
def eval_counter_for_nagios(row, counter, warning, critical, verbosity):
"""get the metric for the key and check within the nagios range
row - metric object
counter - counter from COUNTERS dict
warning -- Nagios warning range
critical -- Nagios critical range
"""
prop = counter['measure']
val = property_value(row, prop)
unit = counter['unit']
return nagios_eval(val, warning, critical, counter['nagios_message'],
unit, verbosity)
def is_within_range(nagstring, value):
"""check if the value is withing the nagios range string
nagstring -- nagios range string
value -- value to compare
Returns true if within the range, else false
"""
if not nagstring:
return False
import re
#import operator
first_float = r'(?P<first>(-?[0-9]+(\.[0-9]+)?))'
second_float = r'(?P<second>(-?[0-9]+(\.[0-9]+)?))'
actions = [ (r'^%s$' % first_float,
lambda y: (value > float(y.group('first'))) or (value < 0)),
(r'^%s:$' % first_float,
lambda y: value < float(y.group('first'))),
(r'^~:%s$' % first_float,
lambda y: value > float(y.group('first'))),
(r'^%s:%s$' % (first_float,second_float),
lambda y: (value < float(y.group('first'))) or
(value > float(y.group('second')))),
(r'^@%s:%s$' % (first_float,second_float),
lambda y: not((value < float(y.group('first'))) or
(value > float(y.group('second')))))]
for regstr, func in actions:
res = re.match(regstr, nagstring)
if res:
return func(res)
raise Exception('Improper warning/critical parameter format.')
def nagios_eval(result, warning, critical, nagios_message, unit='',
verbosity = 0):
"""evaluate result with respect to warning and critical range and
return appropriate error message
result -- counter value
warning -- nagios warning range string
critical -- nagios critical range string
nagios_message -- Nagios message
unit -- unit for the perf counter value
verbosity -- nagios verbosity value
Returns nagios code, and error message
"""
if is_within_range(critical, result):
prefix = 'CRITICAL:'
code = 2
elif is_within_range(warning, result):
prefix = 'WARNING:'
code = 1
else:
prefix = 'OK:'
code = 0
strresult = str(result)
if verbosity == 0:
if code > 0:
nagios_message = '%s' % prefix
else:
nagios_message = ''
elif verbosity == 1:
if code > 0:
nagios_message = nagios_message % (strresult)
nagios_message = '%s:%s=%s %s' % ( prefix, nagios_message,
strresult, unit or '')
else:
nagios_message = ''
else:
nagios_message = nagios_message % (strresult)
nagios_message = '%s%s%s,warning=%s,critical=%s,'\
% ( prefix, nagios_message, unit or '', warning or '', critical or '')
return code, nagios_message
def setup_logger(verbose):
"""Creates a logger, using the verbosity, and returns it."""
global logger
logger = logging.getLogger()
if verbose >= 3:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.WARNING)
logger.addHandler(logging.StreamHandler())
def retrieve_keys(management, storageacct_name):
"""retrieve primary and secondary keys."""
try:
storage_keys = management.get_storage_account_keys(storageacct_name)
primary_key = storage_keys.storage_service_keys.primary
secondary_key = storage_keys.storage_service_keys.secondary
return primary_key, secondary_key
except:
return None, None
def check_storagecap_errors(table_service):
"""Check storage capacity errors supported only for blob
table_service -- table service where metric are stored
warning - Nagios warning level
critical - Nagios critical level
"""
latest_utcime = datetime.utcnow()
latestday = (latest_utcime - timedelta(days=1)).strftime('%Y%m%dT0000')
recentday_partitionkey = 'PartitionKey ge \'%s\'' % latestday
table_name = '$MetricsCapacityBlob'
try:
rows = table_service.query_entities(table_name=table_name,
filter=recentday_partitionkey)
if len(rows) > 1:
row = rows[len(rows)-1]
msg_one = '{0}:{{Capacity:{1}, ContainerCount:{2}, '\
'ObjectCount:{3}}}'.format(row.RowKey, row.Capacity,
row.ContainerCount, row.ObjectCount)
row = rows[len(rows)-2]
msg_two = '{0}:{{Capacity:{1}, ContainerCount:{2},'\
' ObjectCount:{3}}}'.format(row.RowKey,
row.Capacity,
row.ContainerCount,
row.ObjectCount)
return 0, '{0},{1}'.format(msg_one, msg_two)
else:
return 3, 'Capacity data not found'
except azure.WindowsAzureMissingResourceError:
return 3, 'Capacity table not found'
except:
return 3, 'Internal error'
def check_storagetx_errors(table_service, storage_type, key, warning,
critical, verbosity):
"""Check storage transaction errors
table_service -- table service where metric are stored
type -- blob/queue/table
key - needed only for transaction metric
warning - Nagios warning level
critical - Nagios critical level
"""
errors = []
try:
latest_utcime = datetime.utcnow()
latest_hour = (latest_utcime-timedelta(hours=2)).strftime('%Y%m%dT%H00')
recenthour_partitionkey = 'PartitionKey ge \'%s\'' % latest_hour
storage_type = storage_type.lower()
if storage_type == 'blob':
table_name = '$MetricsTransactionsBlob'
elif storage_type == 'table':
table_name = '$MetricsTransactionsTable'
else:
table_name = '$MetricsTransactionsQueue'
rows = table_service.query_entities(table_name = table_name,
filter = recenthour_partitionkey)
if len(rows) > 0:
row = rows[len(rows)-1]
else:
return 3, 'Performance data not available'
current_counters = {}
if key == 'all':
# for inspecting all keys, we can't use critical or warning levels
current_counters = COUNTERS
warning = None
critical = | |
<filename>php-fpm/scripts/fpm-stats.py<gh_stars>0
#!/usr/bin/env python
import select # @UnresolvedImport
import struct
import socket
import errno
import types
import sys
__all__ = ['FCGIApp']
# Constants from the spec.
FCGI_LISTENSOCK_FILENO = 0
FCGI_HEADER_LEN = 8
FCGI_VERSION_1 = 1
FCGI_BEGIN_REQUEST = 1
FCGI_ABORT_REQUEST = 2
FCGI_END_REQUEST = 3
FCGI_PARAMS = 4
FCGI_STDIN = 5
FCGI_STDOUT = 6
FCGI_STDERR = 7
FCGI_DATA = 8
FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
FCGI_UNKNOWN_TYPE = 11
FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
FCGI_NULL_REQUEST_ID = 0
FCGI_KEEP_CONN = 1
FCGI_RESPONDER = 1
FCGI_AUTHORIZER = 2
FCGI_FILTER = 3
FCGI_REQUEST_COMPLETE = 0
FCGI_CANT_MPX_CONN = 1
FCGI_OVERLOADED = 2
FCGI_UNKNOWN_ROLE = 3
FCGI_MAX_CONNS = 'FCGI_MAX_CONNS'
FCGI_MAX_REQS = 'FCGI_MAX_REQS'
FCGI_MPXS_CONNS = 'FCGI_MPXS_CONNS'
FCGI_Header = '!BBHHBx'
FCGI_BeginRequestBody = '!HB5x'
FCGI_EndRequestBody = '!LB3x'
FCGI_UnknownTypeBody = '!B7x'
FCGI_BeginRequestBody_LEN = struct.calcsize(FCGI_BeginRequestBody)
FCGI_EndRequestBody_LEN = struct.calcsize(FCGI_EndRequestBody)
FCGI_UnknownTypeBody_LEN = struct.calcsize(FCGI_UnknownTypeBody)
if __debug__:
import time
# Set non-zero to write debug output to a file.
DEBUG = 0
DEBUGLOG = '/tmp/fcgi_app.log'
def _debug(level, msg):
# pylint: disable=W0702
if DEBUG < level:
return
try:
f = open(DEBUGLOG, 'a')
f.write('%sfcgi: %s\n' % (time.ctime()[4:-4], msg))
f.close()
except:
pass
def decode_pair(s, pos=0):
"""
Decodes a name/value pair.
The number of bytes decoded as well as the name/value pair
are returned.
"""
nameLength = ord(s[pos])
if nameLength & 128:
nameLength = struct.unpack('!L', s[pos:pos + 4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
valueLength = ord(s[pos])
if valueLength & 128:
valueLength = struct.unpack('!L', s[pos:pos + 4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
name = s[pos:pos + nameLength]
pos += nameLength
value = s[pos:pos + valueLength]
pos += valueLength
return (pos, (name, value))
def encode_pair(name, value):
"""
Encodes a name/value pair.
The encoded string is returned.
"""
nameLength = len(name)
if nameLength < 128:
s = chr(nameLength)
else:
s = struct.pack('!L', nameLength | 0x80000000L)
valueLength = len(value)
if valueLength < 128:
s += chr(valueLength)
else:
s += struct.pack('!L', valueLength | 0x80000000L)
return s + name + value
class Record(object):
"""
A FastCGI Record.
Used for encoding/decoding records.
"""
def __init__(self, typ=FCGI_UNKNOWN_TYPE, requestId=FCGI_NULL_REQUEST_ID):
self.version = FCGI_VERSION_1
self.type = typ
self.requestId = requestId
self.contentLength = 0
self.paddingLength = 0
self.contentData = ''
def _recvall(sock, length):
"""
Attempts to receive length bytes from a socket, blocking if necessary.
(Socket may be blocking or non-blocking.)
"""
dataList = []
recvLen = 0
while length:
try:
data = sock.recv(length)
except socket.error, e:
if e[0] == errno.EAGAIN:
select.select([sock], [], [])
continue
else:
raise
if not data: # EOF
break
dataList.append(data)
dataLen = len(data)
recvLen += dataLen
length -= dataLen
return ''.join(dataList), recvLen
_recvall = staticmethod(_recvall)
def read(self, sock):
"""Read and decode a Record from a socket."""
try:
header, length = self._recvall(sock, FCGI_HEADER_LEN)
except:
raise EOFError
if length < FCGI_HEADER_LEN:
raise EOFError
self.version, self.type, self.requestId, self.contentLength, \
self.paddingLength = struct.unpack(FCGI_Header, header)
if __debug__:
_debug(9, 'read: fd = %d, type = %d, requestId = %d, '
'contentLength = %d' %
(sock.fileno(), self.type, self.requestId,
self.contentLength))
if self.contentLength:
try:
self.contentData, length = self._recvall(sock,
self.contentLength)
except:
raise EOFError
if length < self.contentLength:
raise EOFError
if self.paddingLength:
try:
self._recvall(sock, self.paddingLength)
except:
raise EOFError
def _sendall(sock, data):
"""
Writes data to a socket and does not return until all the data is sent.
"""
length = len(data)
while length:
try:
sent = sock.send(data)
except socket.error, e:
if e[0] == errno.EAGAIN:
select.select([], [sock], [])
continue
else:
raise
data = data[sent:]
length -= sent
_sendall = staticmethod(_sendall)
def write(self, sock):
"""Encode and write a Record to a socket."""
self.paddingLength = - self.contentLength & 7
if __debug__:
_debug(9, 'write: fd = %d, type = %d, requestId = %d, '
'contentLength = %d' %
(sock.fileno(), self.type, self.requestId,
self.contentLength))
header = struct.pack(FCGI_Header, self.version, self.type,
self.requestId, self.contentLength,
self.paddingLength)
self._sendall(sock, header)
if self.contentLength:
self._sendall(sock, self.contentData)
if self.paddingLength:
self._sendall(sock, '\x00' * self.paddingLength)
class FCGIApp(object):
def __init__(self, connect=None, host=None, port=None, filterEnviron=True):
if port is not None:
connect = (host, port)
if port is None:
connect = host
self._connect = connect
self._filterEnviron = filterEnviron
def __call__(self, environ, start_response=None):
# For sanity's sake, we don't care about FCGI_MPXS_CONN
# (connection multiplexing). For every request, we obtain a new
# transport socket, perform the request, then discard the socket.
# This is, I believe, how mod_fastcgi does things...
sock = self._getConnection()
# Since this is going to be the only request on this connection,
# set the request ID to 1.
requestId = 1
# Begin the request
rec = Record(FCGI_BEGIN_REQUEST, requestId)
rec.contentData = struct.pack(FCGI_BeginRequestBody, FCGI_RESPONDER, 0)
rec.contentLength = FCGI_BeginRequestBody_LEN
rec.write(sock)
# Filter WSGI environ and send it as FCGI_PARAMS
if self._filterEnviron:
params = self._defaultFilterEnviron(environ)
else:
params = self._lightFilterEnviron(environ)
# TODO: Anything not from environ that needs to be sent also?
self._fcgiParams(sock, requestId, params)
self._fcgiParams(sock, requestId, {})
# Transfer wsgi.input to FCGI_STDIN
#content_length = int(environ.get('CONTENT_LENGTH') or 0)
s = ''
while True:
#chunk_size = min(content_length, 4096)
#s = environ['wsgi.input'].read(chunk_size)
#content_length -= len(s)
rec = Record(FCGI_STDIN, requestId)
rec.contentData = s
rec.contentLength = len(s)
rec.write(sock)
if not s:
break
# Empty FCGI_DATA stream
rec = Record(FCGI_DATA, requestId)
rec.write(sock)
# Main loop. Process FCGI_STDOUT, FCGI_STDERR, FCGI_END_REQUEST
# records from the application.
result = []
err = ''
while True:
inrec = Record()
inrec.read(sock)
if inrec.type == FCGI_STDOUT:
if inrec.contentData:
result.append(inrec.contentData)
else:
# TODO: Should probably be pedantic and no longer
# accept FCGI_STDOUT records?"
pass
elif inrec.type == FCGI_STDERR:
# Simply forward to wsgi.errors
err += inrec.contentData
#environ['wsgi.errors'].write(inrec.contentData)
elif inrec.type == FCGI_END_REQUEST:
# TODO: Process appStatus/protocolStatus fields?
break
# Done with this transport socket, close it. (FCGI_KEEP_CONN was not
# set in the FCGI_BEGIN_REQUEST record we sent above. So the
# application is expected to do the same.)
sock.close()
result = ''.join(result)
# Parse response headers from FCGI_STDOUT
status = '200 OK'
headers = []
pos = 0
while True:
eolpos = result.find('\n', pos)
if eolpos < 0:
break
line = result[pos:eolpos - 1]
pos = eolpos + 1
# strip in case of CR. NB: This will also strip other
# whitespace...
line = line.strip()
# Empty line signifies end of headers
if not line:
break
# TODO: Better error handling
header, value = line.split(':', 1)
header = header.strip().lower()
value = value.strip()
if header == 'status':
# Special handling of Status header
status = value
if status.find(' ') < 0:
# Append a dummy reason phrase if one was not provided
status += ' FCGIApp'
else:
headers.append((header, value))
result = result[pos:]
# Set WSGI status, headers, and return result.
#start_response(status, headers)
#return [result]
return status, headers, result, err
def _getConnection(self):
if self._connect is not None:
# The simple case. Create a socket and connect to the
# application.
if isinstance(self._connect, types.StringTypes):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(15)
try:
sock.connect(self._connect)
except socket.timeout:
sys.exit(1)
except socket.error:
sys.exit(127)
elif hasattr(socket, 'create_connection'):
try:
sock = socket.create_connection(self._connect,15)
except socket.timeout:
sys.exit(1)
except socket.error:
sys.exit(127)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(15)
try:
sock.connect(self._connect)
except socket.timeout:
sys.exit(1)
except socket.error:
sys.exit(127)
return sock
# To be done when I have more time...
raise NotImplementedError(
'Launching and managing FastCGI programs not yet implemented')
def _fcgiGetValues(self, sock, vars): # @ReservedAssignment
# Construct FCGI_GET_VALUES record
outrec = Record(FCGI_GET_VALUES)
data = []
for name in vars:
data.append(encode_pair(name, ''))
data = ''.join(data)
outrec.contentData = data
outrec.contentLength = len(data)
outrec.write(sock)
# Await response
inrec = Record()
inrec.read(sock)
result = {}
if inrec.type == FCGI_GET_VALUES_RESULT:
pos = 0
while pos < inrec.contentLength:
pos, (name, value) = decode_pair(inrec.contentData, pos)
result[name] = value
return result
def _fcgiParams(self, sock, requestId, params):
#print params
rec = Record(FCGI_PARAMS, requestId)
data = []
for name, value in params.items():
data.append(encode_pair(name, value))
data = ''.join(data)
rec.contentData = data
rec.contentLength = len(data)
rec.write(sock)
_environPrefixes = ['SERVER_', 'HTTP_', 'REQUEST_', 'REMOTE_', 'PATH_',
'CONTENT_', 'DOCUMENT_', 'SCRIPT_']
_environCopies = ['SCRIPT_NAME', 'QUERY_STRING', 'AUTH_TYPE']
_environRenames = {}
def _defaultFilterEnviron(self, environ):
result = {}
for n in environ.keys():
for p in self._environPrefixes:
if n.startswith(p):
result[n] = environ[n]
if n in self._environCopies:
result[n] = environ[n]
if n in self._environRenames:
result[self._environRenames[n]] = environ[n]
return result
def _lightFilterEnviron(self, environ):
result = {}
for n in environ.keys():
if n.upper() == n:
result[n] = environ[n]
return result
if len(sys.argv)==4:
myhost = sys.argv[1]
myport = int(sys.argv[2])
field = sys.argv[1]
elif len(sys.argv)==3:
try:
(myhost, myport) = sys.argv[2].split(":")
except ValueError:
(myhost, myport) = (sys.argv[2], None)
field = sys.argv[1]
else:
sys.exit(255)
fcgi = FCGIApp(host=myhost, port=myport)
env = {
| |
"""Contains the primary engine of Semver parsing, SemverThing. Supplies NotSemanticVersion exception.
"""
import re
# regular expression for parsing semantic version text as per semver 2.0 documentation
re_semver = re.compile('^(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$')
# regular expression helpful for breaking 2 prerelease strings into symmetric parts.
re_prerelease = re.compile('^(?P<abc>[a-zA-Z]*)(?P<num>([-.\d]*)?)')
class NotSemanticVersion(Exception):
"Raised when supplied string fails to parse into semantic version information during parse_semver_text."
pass
def parse_semver_text(text):
"""Uses regular expression to parse out the components of a semantic version string.
If regular expression parsing fails, raises NotSemanticVersion exception.
The dictionary returned should contain the following keys if successful:
major
minor
patch
prerelease
buildmetadata
:param text: (str)
:return: match group from regular expression (if successful)
:rtype: dict
:raises: NotSemanticVersion
"""
res = re_semver.match(text)
if not res:
raise NotSemanticVersion('Supplied text "{}" did not pass regular expression parsing.'.format(text))
return res.groupdict()
def rec_cmp_releases(one, two):
"""Recursive function that compares two version strings represented as lists
to determine which one comes "after" / takes precedence, or if they are equivalent.
List items must be integers (will throw TypeError otherwise).
If the left argument ("one") is a later version, returns 1.
If the right argument ("two") is a later version, returns 2.
If they are equivalent, returns 0.
:param one: list of ints to compare to two
:param two: list of ints to compare to one
:returns: code in (0, 1, 2)
:rtype: int
"""
# we've exhausted all three levels of comparison, so logically we're at equivalence.
if len(one)==0:
return 0
top1 = one[0]
top2 = two[0]
if top1 > top2:
return 1
elif top1 < top2:
return 2
else:
return rec_cmp_releases(one[1:], two[1:])
def rec_cmp_prereleases(one, two):
"""Recursive function that compares two version strings represented as lists
to determine which takes precedence. If it is the left argument ("one"), the
result will be a 1. If the righthand argument wins ("two"), the result will be
a 2. If the two are equivalent (i.e. are the same string), the result is a 0.
:param one: left-hand version str
:param two: right-hand version str
:return: 0, 1, or 2
:rtype: int
"""
if one == two:
return 0
# if either has reached its zenith of productivity, the other has won.
#
# (Note that this is only correct in the context of there being conditionals in
# the cmp_prerelease function that already handle the case in which one
# version string has a prerelease and the other one doesn't! This recursive
# comparator function won't ever be invoked in that situation.)
if len(one) == 0:
return 2
elif len(two) == 0:
return 1
# ok so we still have two values to compare.
# try to make ints; if we fail, then we have ASCII.
cmp_strs = {}
cmp_ints = {}
try:
cmp_ints[1] = int(one[0])
except ValueError:
cmp_strs[1] = one[0]
try:
cmp_ints[2] = int(two[0])
except ValueError:
cmp_strs[2] = two[0]
if len(cmp_strs)==2:
# if the two strings are equivalent, recurse down further.
# if not, declare a winner by ASCII value.
if cmp_strs[1] == cmp_strs[2]:
return rec_cmp_prereleases(one[1:], two[1:])
elif cmp_strs[1] > cmp_strs[2]:
return 1
elif cmp_strs[1] < cmp_strs[2]:
return 2
elif len(cmp_ints)==2:
# if the two integers are equivalent, recurse down further.
# otherwise, declare a winner by integer value.
if cmp_ints[1] == cmp_ints[2]:
return rec_cmp_prereleases(one[1:], two[1:])
elif cmp_ints[1] > cmp_ints[2]:
return 1
elif cmp_ints[1] < cmp_ints[2]:
return 2
# Apples and oranges: ASCII always wins.
return list(cmp_strs.keys())[0]
def cmp_prerelease(sv1, sv2):
"""Helper function for comparing the prerelease strings on two SemverThing objects.
Note that if one SV object has NO prerelease string and the other does, the first one with
the empty string takes precedence.
if sv1 has precedence over sv2, returns the number 1.
if sv2 has precedence over sv1, returns the number 2.
if they are equivalent, returns the number 0.
"""
if sv1.prerelease == sv2.prerelease:
return 0
if sv1.prerelease and sv2.prerelease:
# If we have something like alpha-11 and alpha-2, we want to compare the number field
# as numbers (not ASCII, which would give the wrong answer for that pair).
#
# Another possiblity is that we're just comparing numbers, e.g. "1-2-3" to "1-2-4"
#
# In any case, let's standardize to using dots instead of dashes.
pre1 = sv1.prerelease.replace('-', '.')
pre2 = sv2.prerelease.replace('-', '.')
# convert each to a list and then recursively compare the successive strings to each other.
return rec_cmp_prereleases(pre1.split('.'), pre2.split('.'))
# if sv1 has a prerelease, and sv2 doesn't, sv2 takes precedence
elif sv1.prerelease and not sv2.prerelease:
return 2
# finally, if sv2 has a prerelease and sv1 doesn't, sv1 takes precedence.
return 1
class SemverThing(object):
""" You can build a SemverThing in three ways:
1) Instantiate with a plain string, e.g. "1.2.3". The text variable will be parsed
through regular expressions to identify each part of the semver construction. For example:
sv = SemverThing("1.2.3-prerelease+build")
print(sv.build) # build
print(sv.major) # 1
print(sv.prerelease) # prerelease
print(sv.minor) # 2
print(sv.patch) # 3
2) Instantiate with keyword arguments, for example:
sv = SemverThing(major=1, minor=2, patch=3) #, prerelease, buildmetadata
3) Instantiate without arguments to create a blank slate, to which you can
assign values to its version attributes one at a time.
For Example:
sv = SemverThing()
sv.major = 1
sv.minor = 2
sv.patch = 3
print(sv) # 1.2.3
sv.buildmetadata = "somebuild"
print(sv) # 1.2.3+somebuild
sv.prerelease = "alpha"
print(sv) # 1.2.3-alpha+somebuild
Usage:
All arithmetic comparison operators are implemented on this object, so you can do:
sv1 = SemverThing('1.2.3-alpha')
sv2 = SemverThing('1.2.3')
print(sv1 > sv2) # False
print(sv1 != sv2) # True
print(sv2 > sv1) # True
NOTE that numerical version components (major, minor, patch) are converted to integers within
the object for ease of comparison.
To convert a SemverThing to a composed version string, simply use the python str operator::
print(sv1) # "1.2.3-alpha"
print("My version is %s" % sv2) # "My version is 1.2.3"
"""
def __init__(self, text=None, **kwargs):
""" text argument overrides use of kwargs. """
if text:
try:
kwargs = parse_semver_text(text)
except TypeError:
# attempt to create SemverThing using number or other nonsense.
raise NotSemanticVersion('{} is not a valid string.'.format(text))
self.major = kwargs.get('major', None)
self.minor = kwargs.get('minor', None)
self.patch = kwargs.get('patch', None)
self.prerelease = kwargs.get('prerelease', '')
self.buildmetadata = kwargs.get('buildmetadata', '')
# MAGIC PROPERTIES for the numerical attributes:
# 1) convert input to integer (raise ValueError if not convertible to int)
# 2) allow setting properties to None without error.
@property
def major(self):
return self._major
@major.setter
def major(self, value):
if value is None:
self._major = None
return
self._major = int(value)
@property
def minor(self):
return self._minor
@minor.setter
def minor(self, value):
if value is None:
self._minor = None
return
self._minor = int(value)
@property
def patch(self):
return self._patch
@patch.setter
def patch(self, value):
if value is None:
self._patch = None
return
self._patch = int(value)
# COMPARISON OPERATOR DEFINTIIONS:
# The left-hand object in the statement is "self"; the right-hand is "other".
# In cmp_* functions these map to one (1) and two (2) respectively.
# <
def __lt__(self, other):
conditions = {0: False,
1: False,
2: True
}
result = rec_cmp_releases([self.major, self.minor, self.patch],
[other.major, other.minor, other.patch])
if result in (1,2):
return conditions[result]
# equivalence? drill down into prerelease.
return conditions[cmp_prerelease(self, other)]
# <=
def __le__(self, other):
if self.__eq__(other):
return True
if self.__lt__(other):
return True
return False
# >
def __gt__(self, other):
conditions = {0: False,
2: False,
1: True
}
result = rec_cmp_releases([self.major, self.minor, self.patch],
[other.major, other.minor, other.patch])
if result in (1,2):
return conditions[result]
# equivalence? drill down into prerelease.
return conditions[cmp_prerelease(self, other)]
# >=
def __ge__(self, other):
if self.__eq__(other):
return True
if self.__gt__(other):
return True
return False
# ==
def __eq__(self, other):
conditions = {0: True,
1: False,
2: False,
}
# if the result is anything other than 0, these are not equivalent releases.
if rec_cmp_releases([self.major, self.minor, self.patch],
[other.major, other.minor, other.patch]):
| |
nxdata_logger.warning("Could not parse attr @signal=%s on "
"dataset %s as an int",
signal_attr, dsname)
continue
numbered_names.append((signal_number, dsname))
return [a[1] for a in sorted(numbered_names)]
@property
def auxiliary_signals_names(self):
"""List of names of the auxiliary signals.
Similar to :attr:`auxiliary_signals_dataset_names`, but the @long_name
is used when this attribute is present, instead of the dataset name.
"""
if not self.is_valid:
raise InvalidNXdataError("Unable to parse invalid NXdata")
signal_names = []
for asdn in self.auxiliary_signals_dataset_names:
if "long_name" in self.group[asdn].attrs:
signal_names.append(self.group[asdn].attrs["long_name"])
else:
signal_names.append(asdn)
return signal_names
@property
def auxiliary_signals(self):
"""List of all auxiliary signal datasets."""
if not self.is_valid:
raise InvalidNXdataError("Unable to parse invalid NXdata")
return [self.group[dsname] for dsname in self.auxiliary_signals_dataset_names]
@property
def interpretation(self):
"""*@interpretation* attribute associated with the *signal*
dataset of the NXdata group. ``None`` if no interpretation
attribute is present.
The *interpretation* attribute provides information about the last
dimensions of the signal. The allowed values are:
- *"scalar"*: 0-D data to be plotted
- *"spectrum"*: 1-D data to be plotted
- *"image"*: 2-D data to be plotted
- *"vertex"*: 3-D data to be plotted
For example, a 3-D signal with interpretation *"spectrum"* should be
considered to be a 2-D array of 1-D data. A 3-D signal with
interpretation *"image"* should be interpreted as a 1-D array (a list)
of 2-D images. An n-D array with interpretation *"image"* should be
interpreted as an (n-2)-D array of images.
A warning message is logged if the returned interpretation is not one
of the allowed values, but no error is raised and the unknown
interpretation is returned anyway.
"""
if not self.is_valid:
raise InvalidNXdataError("Unable to parse invalid NXdata")
allowed_interpretations = [None, "scalar", "spectrum", "image",
"rgba-image", # "hsla-image", "cmyk-image"
"vertex"]
interpretation = get_attr_as_unicode(self.signal, "interpretation")
if interpretation is None:
interpretation = get_attr_as_unicode(self.group, "interpretation")
if interpretation not in allowed_interpretations:
nxdata_logger.warning("Interpretation %s is not valid." % interpretation +
" Valid values: " + ", ".join(allowed_interpretations))
return interpretation
@property
def axes(self):
"""List of the axes datasets.
The list typically has as many elements as there are dimensions in the
signal dataset, the exception being scatter plots which use a 1D
signal and multiple 1D axes of the same size.
If an axis dataset applies to several dimensions of the signal, it
will be repeated in the list.
If a dimension of the signal has no dimension scale, `None` is
inserted in its position in the list.
.. note::
The *@axes* attribute should define as many entries as there
are dimensions in the signal, to avoid any ambiguity.
If this is not the case, this implementation relies on the existence
of an *@interpretation* (*spectrum* or *image*) attribute in the
*signal* dataset.
.. note::
If an axis dataset defines attributes @first_good or @last_good,
the output will be a numpy array resulting from slicing that
axis (*axis[first_good:last_good + 1]*).
:rtype: List[Dataset or 1D array or None]
"""
if not self.is_valid:
raise InvalidNXdataError("Unable to parse invalid NXdata")
if self._axes is not None:
# use cache
return self._axes
axes = []
for axis_name in self.axes_dataset_names:
if axis_name is None:
axes.append(None)
else:
axes.append(self.group[axis_name])
# keep only good range of axis data
for i, axis in enumerate(axes):
if axis is None:
continue
if "first_good" not in axis.attrs and "last_good" not in axis.attrs:
continue
fg_idx = axis.attrs.get("first_good", 0)
lg_idx = axis.attrs.get("last_good", len(axis) - 1)
axes[i] = axis[fg_idx:lg_idx + 1]
self._axes = axes
return self._axes
@property
def axes_dataset_names(self):
"""List of axes dataset names.
If an axis dataset applies to several dimensions of the signal, its
name will be repeated in the list.
If a dimension of the signal has no dimension scale (i.e. there is a
"." in that position in the *@axes* array), `None` is inserted in the
output list in its position.
"""
if not self.is_valid:
raise InvalidNXdataError("Unable to parse invalid NXdata")
numbered_names = [] # used in case of @axis=0 (old spec)
axes_dataset_names = get_attr_as_unicode(self.group, "axes")
if axes_dataset_names is None:
# try @axes on signal dataset (older NXdata specification)
axes_dataset_names = get_attr_as_unicode(self.signal, "axes")
if axes_dataset_names is not None:
# we expect a comma separated string
if hasattr(axes_dataset_names, "split"):
axes_dataset_names = axes_dataset_names.split(":")
else:
# try @axis on the individual datasets (oldest NXdata specification)
for dsname in self.group:
if not is_dataset(self.group[dsname]):
continue
axis_attr = self.group[dsname].attrs.get("axis")
if axis_attr is not None:
try:
axis_num = int(axis_attr)
except (ValueError, TypeError):
nxdata_logger.warning("Could not interpret attr @axis as"
"int on dataset %s", dsname)
continue
numbered_names.append((axis_num, dsname))
ndims = len(self.signal.shape)
if axes_dataset_names is None:
if numbered_names:
axes_dataset_names = []
numbers = [a[0] for a in numbered_names]
names = [a[1] for a in numbered_names]
for i in range(ndims):
if i in numbers:
axes_dataset_names.append(names[numbers.index(i)])
else:
axes_dataset_names.append(None)
return axes_dataset_names
else:
return [None] * ndims
if isinstance(axes_dataset_names, (six.text_type, six.binary_type)):
axes_dataset_names = [axes_dataset_names]
for i, axis_name in enumerate(axes_dataset_names):
if hasattr(axis_name, "decode"):
axis_name = axis_name.decode()
if axis_name == ".":
axes_dataset_names[i] = None
if len(axes_dataset_names) != ndims:
if self.is_scatter and ndims == 1:
# case of a 1D signal with arbitrary number of axes
return list(axes_dataset_names)
if self.interpretation != "rgba-image":
# @axes may only define 1 or 2 axes if @interpretation=spectrum/image.
# Use the existing names for the last few dims, and prepend with Nones.
assert len(axes_dataset_names) == INTERPDIM[self.interpretation]
all_dimensions_names = [None] * (ndims - INTERPDIM[self.interpretation])
for axis_name in axes_dataset_names:
all_dimensions_names.append(axis_name)
else:
# 2 axes applying to the first two dimensions.
# The 3rd signal dimension is expected to contain 3(4) RGB(A) values.
assert len(axes_dataset_names) == 2
all_dimensions_names = [axn for axn in axes_dataset_names]
all_dimensions_names.append(None)
return all_dimensions_names
return list(axes_dataset_names)
@property
def title(self):
"""Plot title. If not found, returns an empty string.
This attribute does not appear in the NXdata specification, but it is
implemented in *nexpy* as a dataset named "title" inside the NXdata
group. This dataset is expected to contain text.
Because the *nexpy* approach could cause a conflict if the signal
dataset or an axis dataset happened to be called "title", we also
support providing the title as an attribute of the NXdata group.
"""
if not self.is_valid:
raise InvalidNXdataError("Unable to parse invalid NXdata")
title = self.group.get("title")
data_dataset_names = [self.signal_name] + self.axes_dataset_names
if (title is not None and is_dataset(title) and
"title" not in data_dataset_names):
return str(title[()])
title = self.group.attrs.get("title")
if title is None:
return ""
return str(title)
def get_axis_errors(self, axis_name):
"""Return errors (uncertainties) associated with an axis.
If the axis has attributes @first_good or @last_good, the output
is trimmed accordingly (a numpy array will be returned rather than a
dataset).
:param str axis_name: Name of axis dataset. This dataset **must exist**.
:return: Dataset with axis errors, or None
:raise KeyError: if this group does not contain a dataset named axis_name
"""
if not self.is_valid:
raise InvalidNXdataError("Unable to parse invalid NXdata")
# ensure axis_name is decoded, before comparing it with decoded attributes
if hasattr(axis_name, "decode"):
axis_name = axis_name.decode("utf-8")
if axis_name not in self.group:
# tolerate axis_name given as @long_name
for item in self.group:
long_name = get_attr_as_unicode(self.group[item], "long_name")
if long_name is not None and long_name == axis_name:
axis_name = item
break
if axis_name not in self.group:
raise KeyError("group does not contain a dataset named '%s'" % axis_name)
len_axis = len(self.group[axis_name])
fg_idx = self.group[axis_name].attrs.get("first_good", 0)
lg_idx = self.group[axis_name].attrs.get("last_good", len_axis - 1)
# case of axisname_errors dataset present
errors_name = axis_name + "_errors"
if errors_name in self.group and is_dataset(self.group[errors_name]):
if fg_idx != 0 or lg_idx != (len_axis - 1):
return self.group[errors_name][fg_idx:lg_idx + 1]
else:
return self.group[errors_name]
# case of uncertainties dataset name provided in @uncertainties
uncertainties_names = get_attr_as_unicode(self.group, "uncertainties")
if uncertainties_names is None:
uncertainties_names = get_attr_as_unicode(self.signal, "uncertainties")
if isinstance(uncertainties_names, six.text_type):
uncertainties_names = [uncertainties_names]
if uncertainties_names is not None:
# take the uncertainty with the same index as the axis in @axes
axes_ds_names = get_attr_as_unicode(self.group, "axes")
if axes_ds_names is None:
axes_ds_names = get_attr_as_unicode(self.signal, "axes")
if isinstance(axes_ds_names, six.text_type):
axes_ds_names = [axes_ds_names]
elif isinstance(axes_ds_names, numpy.ndarray):
# transform numpy.ndarray into list
axes_ds_names = list(axes_ds_names)
assert isinstance(axes_ds_names, list)
if hasattr(axes_ds_names[0], "decode"):
axes_ds_names = [ax_name.decode("utf-8") for ax_name in axes_ds_names]
if axis_name not in axes_ds_names:
raise KeyError("group attr | |
<reponame>OpenMPDK/SMDK
# This file is part of VoltDB.
# Copyright (C) 2008-2021 VoltDB Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
__author__ = 'scooper'
import sys
import voltdbclient
from voltcli import cli
from voltcli import environment
from voltcli import utility
from voltcli import checkconfig
#===============================================================================
class BaseVerb(object):
#===============================================================================
"""
Base class for verb implementations. Used by the @Volt.Command decorator.
"""
def __init__(self, name, **kwargs):
self.name = name
self.classpath = utility.kwargs_get_string(kwargs, 'classpath', default = None)
self.cli_spec = cli.CLISpec(**kwargs)
self.dirty_opts = False
self.dirty_args = False
self.command_arguments = utility.flatten_to_list(kwargs.get('command_arguments', None))
utility.debug(str(self))
def execute(self, runner):
utility.abort('%s "%s" object does not implement the required execute() method.'
% (self.__class__.__name__, self.name))
def add_options(self, *args):
"""
Add options if not already present as an option or argument.
"""
for o in args:
dest_name = o.get_dest()
if self.cli_spec.find_option(dest_name):
utility.debug('Not adding "%s" option more than once.' % dest_name)
else:
self.cli_spec.add_to_list('options', o)
self.dirty_opts = True
def add_arguments(self, *args):
self.cli_spec.add_to_list('arguments', *args)
self.dirty_args = True
def get_attr(self, name, default = None):
return self.cli_spec.get_attr(name)
def pop_attr(self, name, default = None):
return self.cli_spec.pop_attr(name)
def merge_java_options(self, name, *options):
return self.cli_spec.merge_java_options(name, *options)
def set_defaults(self, **kwargs):
return self.cli_spec.set_defaults(**kwargs)
def __cmp__(self, other):
return cmp(self.name, other.name)
def __str__(self):
return '%s: %s\n%s' % (self.__class__.__name__, self.name, self.cli_spec)
def get_option_count(self):
if not self.cli_spec.options:
return 0
return len(self.cli_spec.options)
def get_argument_count(self):
if not self.cli_spec.arguments:
return 0
return len(self.cli_spec.arguments)
def iter_options(self):
if self.cli_spec.options:
self._check_options()
for o in self.cli_spec.options:
yield o
def iter_arguments(self):
if self.cli_spec.arguments:
self._check_arguments()
for a in self.cli_spec.arguments:
yield a
def _check_options(self):
if self.dirty_opts:
self.cli_spec.options.sort()
self.dirty_opts = False
def _check_arguments(self):
if self.dirty_args:
# Use a local function to sanity check an argument's min/max counts,
# with an additional check applied to arguments other than the last
# one since they cannot repeat or be missing.
def check_argument(cli_spec_arg, is_last):
if cli_spec_arg.min_count < 0 or cli_spec_arg.max_count < 0:
utility.abort('%s argument (%s) has a negative min or max count declared.'
% (self.name, self.cli_spec_arg.name))
if cli_spec_arg.min_count == 0 and cli_spec_arg.max_count == 0:
utility.abort('%s argument (%s) has zero min and max counts declared.'
% (self.name, self.cli_spec_arg.name))
if not is_last and (cli_spec_arg.min_count != 1 or cli_spec_arg.max_count != 1):
utility.abort('%s argument (%s) is not the last argument, '
'but has min/max counts declared.'
% (self.name, self.cli_spec_arg.name))
nargs = len(self.cli_spec.arguments)
if nargs > 1:
# Check all arguments except the last.
for i in range(nargs-1):
check_argument(self.cli_spec.arguments[i], False)
# Check the last argument.
check_argument(self.cli_spec.arguments[-1], True)
self.dirty_args = False
#===============================================================================
class CommandVerb(BaseVerb):
#===============================================================================
"""
Verb that wraps a command function. Used by the @VOLT.Command decorator.
"""
def __init__(self, name, function, **kwargs):
BaseVerb.__init__(self, name, **kwargs)
self.function = function
self.bundles = utility.kwargs_get_list(kwargs, 'bundles')
# Allow the bundles to adjust options.
for bundle in self.bundles:
bundle.initialize(self)
self.add_options(cli.BooleanOption(None, '--dry-run', 'dryrun', None))
def execute(self, runner):
# Start the bundles, e.g. to create client a connection.
for bundle in self.bundles:
bundle.start(self, runner)
try:
# Set up the go() method for use as the default implementation.
runner.set_default_func(self.go)
# Execute the verb function.
self.function(runner)
finally:
# Stop the bundles in reverse order.
for i in range(len(self.bundles)-1, -1, -1):
self.bundles[i].stop(self, runner)
def go(self, runner):
gofound = False
for bundle in self.bundles:
if hasattr(bundle, 'go'):
bundle.go(self, runner)
gofound = True
if not gofound:
utility.abort('go() method is not implemented by any bundle or %s.'
% self.__class__.__name__)
#===============================================================================
class HelpVerb(CommandVerb):
#===============================================================================
"""
Verb to provide standard help. Used by the @VOLT.Help decorator.
"""
def __init__(self, name, function, **kwargs):
CommandVerb.__init__(self, name, function, **kwargs)
self.set_defaults(description = 'Display general or verb-specific help.', baseverb = True)
self.add_options(
cli.BooleanOption('-a', '--all', 'all',
'display all available help, including verb usage'))
self.add_arguments(
cli.StringArgument('verb', 'verb name', min_count = 0, max_count = None))
def go(self, runner):
runner.help(all = runner.opts.all, *runner.opts.verb)
#===============================================================================
class PackageVerb(CommandVerb):
#===============================================================================
"""
Verb to create a runnable Python package. Used by @VOLT.Package decorator.
"""
def __init__(self, name, function, **kwargs):
CommandVerb.__init__(self, name, function, **kwargs)
self.set_defaults(description = 'Create a runnable Python program package.',
baseverb = True,
hideverb = True,
description2 = '''
The optional NAME argument(s) allow package generation for base commands other
than the current one. If no NAME is provided the current base command is
packaged.''')
self.add_options(
cli.BooleanOption('-f', '--force', 'force',
'overwrite existing file without asking',
default = False),
cli.StringOption('-o', '--output_dir', 'output_dir',
'specify the output directory (defaults to the working directory)'))
self.add_arguments(
cli.StringArgument('name', 'base command name', min_count = 0, max_count = None))
def go(self, runner):
runner.package(runner.opts.output_dir, runner.opts.force, *runner.opts.name)
#===============================================================================
class Modifier(object):
#===============================================================================
"""
Class for declaring multi-command modifiers.
"""
def __init__(self, name, function, description, arg_name = ''):
self.name = name
self.description = description
self.function = function
self.arg_name = arg_name.upper()
#===============================================================================
class MultiVerb(CommandVerb):
#===============================================================================
"""
Verb to create multi-commands with modifiers and optional arguments.
"""
def __init__(self, name, function, **kwargs):
CommandVerb.__init__(self, name, function, **kwargs)
self.modifiers = utility.kwargs_get_list(kwargs, 'modifiers', default = [])
if not self.modifiers:
utility.abort('Multi-command "%s" must provide a "modifiers" list.' % self.name)
valid_modifiers = '|'.join([mod.name for mod in self.modifiers])
has_args = 0
rows = []
for mod in self.modifiers:
if mod.arg_name:
usage = '%s %s [ %s ... ]' % (self.name, mod.name, mod.arg_name)
has_args += 1
else:
usage = '%s %s' % (self.name, mod.name)
rows.append((usage, mod.description))
caption = '"%s" Command Variations' % self.name
other_info = utility.format_table(rows, caption = caption, separator = ' ')
self.set_defaults(other_info = other_info.strip())
args = [
cli.StringArgument('modifier',
'command modifier (valid modifiers: %s)' % valid_modifiers)]
if has_args > 0:
if has_args == len(self.modifiers):
arg_desc = 'optional arguments(s)'
else:
arg_desc = 'optional arguments(s) (where applicable)'
args.append(cli.StringArgument('arg', arg_desc, min_count = 0, max_count = None))
self.add_arguments(*args)
def go(self, runner):
mod_name = runner.opts.modifier.lower()
for mod in self.modifiers:
if mod.name == mod_name:
mod.function(runner)
break
else:
utility.error('Invalid "%s" modifier "%s". Valid modifiers are listed below:'
% (self.name, mod_name),
[mod.name for mod in self.modifiers])
runner.help(self.name)
#===============================================================================
class VerbDecorators(object):
#===============================================================================
"""
Provide decorators used by command implementations to declare commands.
NB: All decorators assume they are being called. E.g. @VOLT.Command() is
valid, but @VOLT.Command is not, even though Python won't catch the latter
as a compile-time error.
"""
def __init__(self, verbs):
"""
Constructor. The verbs argument is the dictionary to populate with
discovered Verb objects. It maps verb name to Verb object.
"""
self.verbs = verbs
def _add_verb(self, verb):
if verb.name in self.verbs:
utility.abort('Verb "%s" is declared more than once.' % verb.name)
self.verbs[verb.name] = verb
def _get_decorator(self, verb_class, *args, **kwargs):
def inner_decorator(function):
def wrapper(*args, **kwargs):
function(*args, **kwargs)
verb = verb_class(function.__name__, wrapper, *args, **kwargs)
self._add_verb(verb)
return wrapper
return inner_decorator
def Command(self, *args, **kwargs):
"""
@VOLT.Command decorator for declaring general-purpose commands.
"""
return self._get_decorator(CommandVerb, *args, **kwargs)
def Help(self, *args, **kwargs):
"""
@VOLT.Help decorator for declaring help commands.
"""
return self._get_decorator(HelpVerb, *args, **kwargs)
def Package(self, *args, **kwargs):
"""
@VOLT.Package decorator for declaring commands for CLI packaging.
"""
return self._get_decorator(PackageVerb, *args, **kwargs)
def Multi_Command(self, *args, **kwargs):
"""
@VOLT.Multi decorator for declaring "<verb> <tag>" commands.
"""
return self._get_decorator(MultiVerb, *args, **kwargs)
#===============================================================================
class VerbSpace(object):
#===============================================================================
"""
Manages a collection of Verb objects that support a particular CLI interface.
"""
def __init__(self, name, version, description, VOLT, scan_dirs, verbs, pro_version):
self.name = name
self.version = version
self.pro_version = pro_version
self.description = description.strip()
self.VOLT = VOLT
self.scan_dirs = scan_dirs
self.verbs = verbs
self.verb_names = list(self.verbs.keys())
self.verb_names.sort()
#===============================================================================
class JavaBundle(object):
#===============================================================================
"""
Verb that wraps a function that calls into a Java class. Used by
the @VOLT.Java decorator.
"""
def __init__(self, java_class):
self.java_class = java_class
def initialize(self, verb):
verb.add_options(
cli.StringOption(None, '--client', 'clientport', 'specify the client port as [ipaddress:]port-number'),
cli.StringOption(None, '--internal', 'internalport', 'specify the internal port as [ipaddress:]port-number used to communicate between cluster nodes'),
cli.StringOption(None, '--zookeeper', 'zkport', 'specify the zookeeper port as [ipaddress:]port-number'),
cli.StringOption(None, '--drpublic', 'drpublic', 'Specifies the interface (ipaddress[:port-number]) advertised to consumer clusters as the DR interface, used in hosted environments where internal interfaces are not | |
body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(InlineResponse2006, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['id'] = \
id
return self.portfolio_position_list_get_endpoint.call_with_http_info(**kwargs)
def portfolio_transaction_cash_create_post(
self,
**kwargs
) -> InlineResponse2011:
"""Add a cash transaction to a portfolio. # noqa: E501
Add a cash transaction to a portfolio. Certain error conditions yield errors as follows: |Error Condition|HTTP Error| |-------|--------| |The number of transactions would exceed 1000.|400 Bad Request| # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Keyword Args:
body (InlineObject7): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2011
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
return self.portfolio_transaction_cash_create_post_endpoint.call_with_http_info(**kwargs)
def portfolio_transaction_cash_create_post_with_http_info(
self,
**kwargs
) -> typing.Tuple[InlineResponse2011, int, typing.MutableMapping]:
"""Add a cash transaction to a portfolio. # noqa: E501
Add a cash transaction to a portfolio. Certain error conditions yield errors as follows: |Error Condition|HTTP Error| |-------|--------| |The number of transactions would exceed 1000.|400 Bad Request| # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Keyword Args:
body (InlineObject7): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2011
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
return self.portfolio_transaction_cash_create_post_endpoint.call_with_http_info(**kwargs)
def portfolio_transaction_cash_create_post_async(
self,
**kwargs
) -> "ApplyResult[InlineResponse2011]":
"""Add a cash transaction to a portfolio. # noqa: E501
Add a cash transaction to a portfolio. Certain error conditions yield errors as follows: |Error Condition|HTTP Error| |-------|--------| |The number of transactions would exceed 1000.|400 Bad Request| # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Keyword Args:
body (InlineObject7): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[InlineResponse2011]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
return self.portfolio_transaction_cash_create_post_endpoint.call_with_http_info(**kwargs)
def portfolio_transaction_cash_create_post_with_http_info_async(
self,
**kwargs
) -> "ApplyResult[typing.Tuple[InlineResponse2011, int, typing.MutableMapping]]":
"""Add a cash transaction to a portfolio. # noqa: E501
Add a cash transaction to a portfolio. Certain error conditions yield errors as follows: |Error Condition|HTTP Error| |-------|--------| |The number of transactions would exceed 1000.|400 Bad Request| # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Keyword Args:
body (InlineObject7): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(InlineResponse2011, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
return self.portfolio_transaction_cash_create_post_endpoint.call_with_http_info(**kwargs)
def portfolio_transaction_cash_delete_post(
self,
**kwargs
) -> InlineResponse2007:
"""Delete a cash transaction. # noqa: E501
Delete a cash transaction. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Keyword Args:
body (InlineObject8): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2007
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
return self.portfolio_transaction_cash_delete_post_endpoint.call_with_http_info(**kwargs)
def portfolio_transaction_cash_delete_post_with_http_info(
self,
**kwargs
) -> typing.Tuple[InlineResponse2007, int, typing.MutableMapping]:
"""Delete a cash transaction. # noqa: E501
Delete a cash transaction. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Keyword Args:
body (InlineObject8): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will | |
<reponame>gallantlab/tikreg
from tikreg.models import * # TODO:
from tikreg.models import _ols, _generalized_tikhonov_dual
import tikreg.utils as tikutils
def test_kernel_kron():
# generate data
n,p,d = 20, 10, 5
delays = range(d)
Xtrain = np.random.randn(n,p)
Xtest = np.random.randn(int(n/2),p)
# construct prior
a, b = np.random.randn(p,p), np.random.randn(d,d)
sigma_x = np.dot(a.T, a)
sigma_t = np.dot(b.T, b)
sigma = np.kron(sigma_t, sigma_x)
Xtrn = tikutils.delay_signal(Xtrain, delays)
Xtst = tikutils.delay_signal(Xtest, delays)
XSXtrn = np.linalg.multi_dot([Xtrn, sigma, Xtrn.T])
K = kernel_spatiotemporal_prior(Xtrain, sigma_t, sigma_x, delays=delays)
assert np.allclose(XSXtrn, K)
assert np.allclose(np.corrcoef(XSXtrn.ravel(), K.ravel())[0,1], 1)
XSXtst = np.linalg.multi_dot([Xtst, sigma, Xtrn.T])
K = kernel_spatiotemporal_prior(Xtrain, sigma_t, sigma_x, Xtest=Xtest, delays=delays)
assert np.allclose(XSXtst, K)
assert np.allclose(np.corrcoef(XSXtst.ravel(), K.ravel())[0,1], 1)
def test_kernel_banded_temporal():
A = np.random.randn(10,10)
B = np.random.randn(20,10)
ridge_scale = 3.0**2
STS = np.eye(10)*ridge_scale
T = np.random.randn(5,5)
TTT = np.dot(T.T, T)
# ktrain
K = kernel_spatiotemporal_prior(A, TTT, STS,
delays=range(5))
kk = kernel_banded_temporal_prior(np.dot(A, A.T), TTT, ridge_scale,
delays=range(5))
assert np.allclose(K, kk)
# ktest
K = kernel_spatiotemporal_prior(A, TTT, STS, Xtest=B,
delays=range(5))
kk = kernel_banded_temporal_prior(np.dot(B, A.T), TTT, ridge_scale,
delays=range(5))
assert np.allclose(K, kk)
def test_ols():
B, X, Y = tikutils.generate_data(noise=0, dozscore=False)
Bh = ols(X, Y)
assert np.allclose(Bh, B)
Bh = _ols(X, Y)
assert np.allclose(Bh, B)
def test_olspred():
B, (Xtrn, Xtest), (Ytrn, Ytest) = tikutils.generate_data(noise=0, testsize=20, dozscore=False)
Bh = ols(Xtrn, Ytrn)
Ytest_direct = np.dot(Xtest, Bh) # Explicit predictions
Ytest_tricks = olspred(Xtrn, Ytrn, Xtest=Xtest) # implicit predictions
assert np.allclose(Ytest_tricks, Ytest_direct)
# implicit within-set predictions
Ytrn_hat = olspred(Xtrn, Ytrn)
assert np.allclose(Ytrn_hat, Ytrn)
def test_solve_l2_primal():
ridges = [0.0, 10.0, 100.0, 1000.0]
ridge_test = 1
# get some data
B, (Xtrn, Xtest), (Ytrn, Ytest) = tikutils.generate_data(n=100, p=20,
noise=0, testsize=20, dozscore=False)
# get direct solution
Bhat_direct = simple_ridge_primal(Xtrn, Ytrn, ridge=ridges[ridge_test]**2)
fit = solve_l2_primal(Xtrn, Ytrn, Xtest=Xtest, Ytest=zscore(Ytest),
ridges=ridges, verbose=False, EPS=0, # NO EPS threshold
weights=True, predictions=False, performance=False)
Bhat_indirect = fit['weights']
assert np.allclose(Bhat_indirect[ridge_test], Bhat_direct)
# check we can get OLS
Bols = ols(Xtrn, Ytrn)
Bhat_indirect_ols = fit['weights'][0]
assert np.allclose(Bols, Bhat_indirect_ols)
# test keyword arguments work as expected
fit = solve_l2_primal(Xtrn, Ytrn, Xtest=Xtest, Ytest=zscore(Ytest),
ridges=ridges, verbose=False, EPS=0, # NO EPS threshold
weights=False, predictions=True, performance=True)
assert ('predictions' in fit) and ('performance' in fit) and ('weights' not in fit)
# check predictions
Yhat_direct = np.dot(Xtest, Bhat_direct)
Yhat_indirect = fit['predictions']
assert np.allclose(Yhat_indirect[ridge_test], Yhat_direct)
# check performance
cc_direct = tikutils.columnwise_correlation(Yhat_direct, Ytest)
cc_indirect = fit['performance']
assert np.allclose(cc_direct, cc_indirect[ridge_test])
def test_solve_l2_dual():
ridges = [0.0, 10.0, 100.0, 1000.0]
ridge_test = 2
# get some data
B, (Xtrn, Xtest), (Ytrn, Ytest) = tikutils.generate_data(n=100, p=20,
noise=0, testsize=20, dozscore=False)
# get direct solution
Bhat_direct = simple_ridge_dual(Xtrn, Ytrn, ridge=ridges[ridge_test]**2)
Ktrn = np.dot(Xtrn, Xtrn.T)
Ktest = np.dot(Xtest, Xtrn.T)
fit = solve_l2_dual(Ktrn, Ytrn, Ktest=Ktest, Ytest=zscore(Ytest),
ridges=ridges, verbose=False, EPS=0, # NO EPS threshold
weights=True, predictions=False, performance=False)
# project to linear space
Bhat_indirect = np.tensordot(Xtrn.T, fit['weights'], (1,1)).swapaxes(0,1)
assert np.allclose(Bhat_indirect[ridge_test], Bhat_direct)
# check we can get OLS
Bols = ols(Xtrn, Ytrn)
# project to linear space
Bhat_indirect_ols = np.dot(Xtrn.T, fit['weights'][0])
assert np.allclose(Bols, Bhat_indirect_ols)
# test keyword arguments work as expected
fit = solve_l2_dual(Ktrn, Ytrn, Ktest=Ktest, Ytest=zscore(Ytest),
ridges=ridges, verbose=False, EPS=0, # NO EPS threshold
weights=False, predictions=True, performance=True)
assert ('predictions' in fit) and ('performance' in fit) and ('weights' not in fit)
# check predictions
Yhat_direct = np.dot(Xtest, Bhat_direct)
Yhat_indirect = fit['predictions']
assert np.allclose(Yhat_indirect[ridge_test], Yhat_direct)
# check performance
cc_direct = tikutils.columnwise_correlation(Yhat_direct, Ytest)
cc_indirect = fit['performance']
assert np.allclose(cc_direct, cc_indirect[ridge_test])
# compare against primal representation
fit_primal = solve_l2_primal(Xtrn, Ytrn, Xtest=Xtest, Ytest=zscore(Ytest),
ridges=ridges, verbose=False, EPS=0, # NO EPS threshold
weights=True, predictions=False, performance=False)
Bhat_primal = fit_primal['weights']
assert np.allclose(Bhat_primal, Bhat_indirect)
# test non-linear kernel
kernels_to_test = ['gaussian', 'ihpolykern', 'hpolykern', 'multiquad']
kernel_params_to_test = [10., 3., 2., 20.]
ridges = [0] # No regularization
for kernel_name, kernel_param in zip(kernels_to_test, kernel_params_to_test):
lzk = lazy_kernel(Xtrn, kernel_type=kernel_name)
lzk.update(kernel_param)
rlambdas = zscore(np.random.randn(Xtrn.shape[0], 20))
Y = np.dot(lzk.kernel, rlambdas)
# NB: multiquad kernel produces negative eigen-values! This means that
# thresholding the eigen-values to be positive (EPS > 0) will lead to
# inperfect weight recovery. For this reason, the test uses EPS=None.
EPS = None if kernel_name == 'multiquad' else 0
fit = solve_l2_dual(lzk.kernel, Y,
ridges=ridges, verbose=False, EPS=EPS,
weights=True, predictions=False, performance=False)
assert np.allclose(rlambdas, fit['weights'].squeeze())
def test_cvridge():
ridges = np.logspace(1,3,10)
voxel = 20
ridge = 5
ps = [50, 100]
ns = [100, 50]
# test primal and dual
for N, P in zip(ns, ps):
# get fake data
B, (Xt, Xv), (Yt, Yv) = tikutils.generate_data(n=N, p=P, testsize=30, v=100, noise=2.0)
# Check all works for 1 voxel case
fit = cvridge(Xt, Yt[:,voxel].squeeze(),
Xtest=Xv, Ytest=Yv[:, voxel].squeeze(),
ridges=ridges, kernel_name='linear',
kernel_params=None, folds='cv', nfolds=5, blocklen=5,
verbose=False, EPS=0, withinset_test=False,
performance=True, predictions=True, weights=True)
cvres = fit['cvresults']
optidx = np.argmax(cvres.squeeze().mean(0))
optridge = ridges[optidx]
B = simple_ridge_primal(Xt, Yt, ridge=optridge**2)
assert np.allclose(fit['weights'].squeeze(), B[:, voxel])
# check all works for 1 ridge case
fit = cvridge(Xt, Yt,
Xtest=Xv, Ytest=Yv,
ridges=[ridges[ridge]], kernel_name='linear',
kernel_params=None, folds='cv', nfolds=5, blocklen=5,
verbose=False, EPS=0, withinset_test=False,
performance=True, predictions=True, weights=True)
cvres = fit['cvresults']
B = simple_ridge_primal(Xt, Yt, ridge=ridges[ridge]**2)
assert np.allclose(fit['weights'].squeeze(), B)
# one ridge, one voxel
fit = cvridge(Xt, Yt[:,voxel].squeeze(),
Xtest=Xv, Ytest=Yv[:, voxel].squeeze(),
ridges=[ridges[ridge]], kernel_name='linear',
kernel_params=None, folds='cv', nfolds=5, blocklen=5,
verbose=False, EPS=0, withinset_test=False,
performance=True, predictions=True, weights=True)
cvres = fit['cvresults']
B = simple_ridge_primal(Xt, Yt, ridge=ridges[ridge]**2)
assert np.allclose(fit['weights'].squeeze(), B[:, voxel])
# check predictions work
fit = cvridge(Xt, Yt,
Xtest=Xv, Ytest=Yv,
ridges=ridges, kernel_name='linear',
kernel_params=None, folds='cv', nfolds=5, blocklen=5,
verbose=False, EPS=0, withinset_test=False,
performance=True, predictions=True, weights=True)
cvres = fit['cvresults']
optidx = np.argmax(cvres.squeeze().mean(0).mean(-1))
optridge = ridges[optidx]
B = simple_ridge_primal(Xt, Yt, ridge=optridge**2)
assert np.allclose(fit['weights'], B)
# test cv results
folds = [(np.arange(10,N), np.arange(10)),
(np.arange(20,N), np.arange(20)),
(np.arange(30,N), np.arange(30)),
]
fit = cvridge(Xt, Yt,
Xtest=Xv, Ytest=Yv,
ridges=ridges, kernel_name='linear',
kernel_params=None, folds=folds, nfolds=5, blocklen=5,
verbose=False, EPS=0, withinset_test=False,
performance=True, predictions=True, weights=True)
cvres = fit['cvresults']
for fdx in range(len(folds)):
# compute the fold prediction performance
B = simple_ridge_primal(Xt[folds[fdx][0]],
Yt[folds[fdx][0]],
ridge=ridges[ridge]**2)
Yhat = np.dot(Xt[folds[fdx][1]], B)
cc = tikutils.columnwise_correlation(Yhat, Yt[folds[fdx][1]])
assert np.allclose(cc, cvres[fdx,0,ridge])
# test non-linear kernel CV
Ns = [100, 50]
Ps = [50, 100]
from scipy import linalg as LA
np.random.seed(8)
for N, P in zip(Ns, Ps):
B, (Xtrn, Xtest), (Ytrn, Ytest) = tikutils.generate_data(n=N, p=P,
noise=0, testsize=20,
dozscore=False)
# test non-linear kernel
kernels_to_test = ['gaussian', 'ihpolykern', 'hpolykern', 'multiquad']
kernel_params = [10., 3., 2., 100.]
ridges = [0.0]
for kernel_name, kernel_param in zip(kernels_to_test, kernel_params):
lzk = lazy_kernel(Xtrn, kernel_type=kernel_name)
lzk.update(kernel_param)
rlambdas = zscore(np.random.randn(Xtrn.shape[0], 20))
Y = np.dot(lzk.kernel, rlambdas)
# NB: multiquad kernel produces negative eigen-values! This means that
# thresholding the eigen-values to be positive (EPS > 0) will lead to
# inperfect weight recovery. For this reason, the test uses EPS=None.
EPS = None if kernel_name == 'multiquad' else 0
fit = cvridge(Xtrn, Y,
ridges=ridges,
kernel_name=kernel_name, kernel_params=kernel_params,
folds='cv', nfolds=5, blocklen=5, trainpct=0.8,
verbose=True, EPS=EPS,
weights=True, predictions=False, performance=False)
cvres = fit['cvresults']
surface = np.nan_to_num(cvres.mean(0)).mean(-1)
# find the best point in the 2D space
max_point = np.where(surface.max() == surface)
# make sure it's unique (conservative-ish biggest ridge/parameter)
max_point = map(max, max_point)
# The maximum point
kernmax, ridgemax = max_point
kernopt, ridgeopt = kernel_params[kernmax], ridges[ridgemax]
# Solve explicitly
lzk.update(kernopt)
L, Q = LA.eigh(lzk.kernel)
rlambda_hat = np.dot(np.dot(Q, np.diag(1.0/L)), np.dot(Q.T, Y))
assert np.allclose(rlambda_hat, fit['weights'].squeeze())
if N > P:
# N < P cross-testidation will not always work in recovering the true
# kernel parameter because similar kernel parameters yield close to
# optimal answers in the folds
# NB: gaussian kernel doesn't always pass this test because
# the optimal kernel parameter is not always found.
# the np.seed fixes this.
assert np.allclose(rlambdas, fit['weights'].squeeze())
def test_generalized_tikhonov():
Ns = [100, 50]
Ps = [50, 100]
for N, p in zip(Ns, Ps):
B, (X, Xtest), (Y, Ytest) = tikutils.generate_data(n=N, p=p, testsize=30)
Ytest = zscore(Ytest)
L = np.random.randint(0, 100, (p,p))
Li = LA.inv(L)
ridge = 10.0
direct = simple_generalized_tikhonov(X, Y, L, ridge=ridge**2)
stdform = generalized_tikhonov(X, Y, Li, ridge=ridge**2)
stdform_dual = _generalized_tikhonov_dual(X, Y, Li, ridge=ridge**2)
assert np.allclose(direct, stdform)
assert np.allclose(direct, stdform_dual)
# compute predictions and performance
Yhat = np.dot(Xtest, direct)
cc = tikutils.columnwise_correlation(Yhat, Ytest)
# use standard machinery
Atrn = np.dot(X, Li)
Atest = np.dot(Xtest, Li)
fit = solve_l2_primal(Atrn, Y, Atest, Ytest=Ytest,
ridges=[ridge], performance=True,
weights=True, predictions=True)
W = np.dot(Li, fit['weights'].squeeze())
assert np.allclose(W, direct)
assert np.allclose(fit['predictions'], Yhat)
assert np.allclose(fit['performance'], cc)
# use standard machiner dual
Atrn = np.dot(X, Li)
Atest = np.dot(Xtest, Li)
Ktrn = np.dot(Atrn, Atrn.T)
Ktest = np.dot(Atest, Atrn.T)
fit = | |
# -----------------------------------------------------------
# hexalattice module creates and prints hexagonal lattices
#
# (C) 2020 <NAME>,
# Released under MIT License
# email <EMAIL>
# Full documentation: https://github.com/alexkaz2/hexalattice
# -----------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.collections import PatchCollection
from typing import List, Union
def create_hex_grid(nx: int = 4,
ny: int = 5,
min_diam: float = 1.,
n: int = 0,
align_to_origin: bool = True,
face_color: Union[List[float], str] = None,
edge_color: Union[List[float], str] = None,
plotting_gap: float = 0.,
crop_circ: float = 0.,
do_plot: bool = False,
rotate_deg: float = 0.,
keep_x_sym: bool = True,
h_ax: plt.Axes = None) -> (np.ndarray, plt.Axes):
"""
Creates and prints hexagonal lattices.
:param nx: Number of horizontal hexagons in rectangular grid, [nx * ny]
:param ny: Number of vertical hexagons in rectangular grid, [nx * ny]
:param min_diam: Minimal diameter of each hexagon.
:param n: Alternative way to create rectangular grid. The final grid might have less hexagons
:param align_to_origin: Shift the grid s.t. the central tile will center at the origin
:param face_color: Provide RGB triplet, valid abbreviation (e.g. 'k') or RGB+alpha
:param edge_color: Provide RGB triplet, valid abbreviation (e.g. 'k') or RGB+alpha
:param plotting_gap: Gap between the edges of adjacent tiles, in fraction of min_diam
:param crop_circ: Disabled if 0. If >0 a circle of central tiles will be kept, with radius r=crop_circ
:param do_plot: Add the hexagon to an axes. If h_ax not provided a new figure will be opened.
:param rotate_deg: Rotate the grid around the center of the central tile, by rotate_deg degrees
:param keep_x_sym: NOT YET IMPLEMENTED
:param h_ax: Handle to axes. If provided the grid will be added to it, if not a new figure will be opened.
:return:
"""
args_are_ok = check_inputs(nx, ny, min_diam, n, align_to_origin, face_color, edge_color, plotting_gap, crop_circ,
do_plot, rotate_deg, keep_x_sym)
if not args_are_ok:
print('Aborting hexagonal grid creation...')
exit()
coord_x, coord_y = make_grid(nx, ny, min_diam, n, crop_circ, rotate_deg, align_to_origin)
if do_plot:
h_ax = plot_single_lattice(coord_x, coord_y, face_color, edge_color, min_diam, plotting_gap, rotate_deg, h_ax)
return np.hstack([coord_x, coord_y]), h_ax
def check_inputs(nx, ny, min_diam, n, align_to_origin, face_color, edge_color, plotting_gap, crop_circ, do_plot,
rotate_deg, keep_x_sym):
"""
Validate input types, ranges and co-compatibility
:return: bool - Assertion verdict
"""
args_are_valid = True
if (not isinstance(nx, (int, float))) or (not isinstance(ny, (int, float))) or (not isinstance(n, (int, float))) \
or (nx < 0) or (nx < 0) or (nx < 0):
print('Argument error in hex_grid: nx, ny and n are expected to be integers')
args_are_valid = False
if (not isinstance(min_diam, (float, int))) or (not isinstance(crop_circ, (float, int))) or (min_diam < 0) or \
(crop_circ < 0):
print('Argument error in hex_grid: min_diam and crop_circ are expected to be floats')
args_are_valid = False
if (not isinstance(align_to_origin, bool)) or (not isinstance(do_plot, bool)):
print('Argument error in hex_grid: align_to_origin and do_plot are expected to be booleans')
args_are_valid = False
VALID_C_ABBR = {'b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'}
if (isinstance(face_color, str) and (not face_color in VALID_C_ABBR)) or \
(isinstance(edge_color, str) and (not edge_color in VALID_C_ABBR)):
print('Argument error in hex_grid: edge_color and face_color are expected to valid color abbrs, e.g. `k`')
args_are_valid = False
if (isinstance(face_color, List) and ((len(face_color) not in (3, 4)) or
(True in ((x < 0) or (x > 1) for x in face_color)))) or \
(isinstance(edge_color, List) and ((len(edge_color) not in (3, 4)) or
(True in ((x < 0) or (x > 1) for x in edge_color)))):
print('Argument error in hex_grid: edge_color and face_color are expected to be valid RGB color triplets or '
'color abbreviations, e.g. [0.1 0.3 0.95] or `k`')
args_are_valid = False
if (not isinstance(plotting_gap, float)) or (plotting_gap < 0) or (plotting_gap >= 1):
print('Argument error in hex_grid: plotting_gap is expected to be a float in range [0, 1)')
args_are_valid = False
if not isinstance(rotate_deg, (float, int)):
print('Argument error in hex_grid: float is expected to be float or integer')
args_are_valid = False
if (n == 0) and ((nx == 0) or (ny == 0)):
print('Argument error in hex_grid: Expected either n>0 or both [nx.ny]>0')
args_are_valid = False
if (isinstance(min_diam, (float, int)) and isinstance(crop_circ, (float, int))) and \
(not np.isclose(crop_circ, 0)) and (crop_circ < min_diam):
print('Argument error in hex_grid: Cropping radius is expected to be bigger than a single hexagon diameter')
args_are_valid = False
if not isinstance(keep_x_sym, bool):
print('Argument error in hex_grid: keep_x_sym is expected to be boolean')
args_are_valid = False
return args_are_valid
def plot_single_lattice(coord_x, coord_y, face_color, edge_color, min_diam, plotting_gap, rotate_deg, h_ax=None):
"""
Adds a single lattice to the axes canvas. Multiple calls can be made to overlay few lattices.
:return:
"""
if face_color is None:
face_color = (1, 1, 1, 0) # Make the face transparent
if edge_color is None:
edge_color = 'k'
if h_ax is None:
h_fig = plt.figure(figsize=(5, 5))
h_ax = h_fig.add_axes([0.05, 0.05, 0.9, 0.9])
patches = []
for curr_x, curr_y in zip(coord_x, coord_y):
polygon = mpatches.RegularPolygon((curr_x, curr_y), numVertices=6,
radius=min_diam / np.sqrt(3) * (1 - plotting_gap),
orientation=np.deg2rad(-rotate_deg))
patches.append(polygon)
collection = PatchCollection(patches, edgecolor=edge_color, facecolor=face_color)
h_ax.add_collection(collection)
h_ax.set_aspect('equal')
h_ax.axis([coord_x.min() - 2 * min_diam, coord_x.max() + 2 * min_diam, coord_y.min() - 2 * min_diam,
coord_y.max() + 2 * min_diam])
# plt.plot(0, 0, 'r.', markersize=5) # Add red point at the origin
return h_ax
def make_grid(nx, ny, min_diam, n, crop_circ, rotate_deg, align_to_origin) -> (np.ndarray, np.ndarray):
"""
Computes the coordinates of the hexagon centers, given the size rotation and layout specifications
:return:
"""
ratio = np.sqrt(3) / 2
if n > 0: # n variable overwrites (nx, ny) in case all three were provided
ny = int(np.sqrt(n / ratio))
nx = n // ny
coord_x, coord_y = np.meshgrid(np.arange(nx), np.arange(ny), sparse=False, indexing='xy')
coord_y = coord_y * ratio
coord_x = coord_x.astype('float')
coord_x[1::2, :] += 0.5
coord_x = coord_x.reshape(-1, 1)
coord_y = coord_y.reshape(-1, 1)
coord_x *= min_diam # Scale to requested size
coord_y = coord_y.astype('float') * min_diam
mid_x = (np.ceil(nx / 2) - 1) + 0.5 * (np.ceil(ny/2) % 2 == 0) # Pick center of some hexagon as origin for rotation or crop...
mid_y = (np.ceil(ny / 2) - 1) * ratio # np.median() averages center 2 values for even arrays :\
mid_x *= min_diam
mid_y *= min_diam
# mid_x = (nx // 2 - (nx % 2 == 1)) * min_diam + 0.5 * (ny % 2 == 1)
# mid_y = (ny // 2 - (ny % 2)) * min_diam * ratio
if crop_circ > 0:
rad = ((coord_x - mid_x)**2 + (coord_y - mid_y)**2)**0.5
coord_x = coord_x[rad.flatten() <= crop_circ, :]
coord_y = coord_y[rad.flatten() <= crop_circ, :]
if not np.isclose(rotate_deg, 0): # Check if rotation is not 0, with tolerance due to float format
# Clockwise, 2D rotation matrix
RotMatrix = np.array([[np.cos(np.deg2rad(rotate_deg)), np.sin(np.deg2rad(rotate_deg))],
[-np.sin(np.deg2rad(rotate_deg)), np.cos(np.deg2rad(rotate_deg))]])
rot_locs = np.hstack((coord_x - mid_x, coord_y - mid_y)) @ RotMatrix.T
# rot_locs = np.hstack((coord_x - mid_x, coord_y - mid_y))
coord_x, coord_y = np.hsplit(rot_locs + np.array([mid_x, mid_y]), 2)
if align_to_origin:
coord_x -= mid_x
coord_y -= mid_y
return coord_x, coord_y
def plot_single_lattice_custom_colors(coord_x, coord_y, face_color, edge_color, min_diam, plotting_gap, rotate_deg,
line_width=1., h_ax=None):
"""
Plot hexagonal lattice where every hexagon is colored by an individual color.
All inputs are similar to the plot_single_lattice() except:
:param line_width:
:param h_ax:
:param rotate_deg:
:param plotting_gap:
:param min_diam:
:param coord_y:
:param coord_x:
:param face_color: numpy array, Nx3 or Nx4 - Color list of length |coord_x| for each hexagon face.
Each row is a RGB or RGBA values, e.g. [0.3 0.3 0.3 1]
:param edge_color: numpy array, Nx3 or Nx4 - Color list of length |coord_x| for each hexagon edge.
Each row is a RGB or RGBA values, e.g. [0.3 0.3 0.3 1]
:return:
"""
if h_ax is None:
h_fig = plt.figure(figsize=(5, 5))
h_ax = h_fig.add_axes([0.05, 0.05, 0.9, 0.9])
for i, (curr_x, curr_y) in enumerate(zip(coord_x, coord_y)):
polygon = mpatches.RegularPolygon((curr_x, curr_y), numVertices=6,
radius=min_diam / np.sqrt(3) * (1 - plotting_gap),
orientation=np.deg2rad(-rotate_deg),
edgecolor=edge_color[i],
facecolor=face_color[i], linewidth=line_width)
h_ax.add_artist(polygon)
h_ax.set_aspect('equal')
h_ax.axis([coord_x.min() - 2 * min_diam, coord_x.max() + 2 * min_diam, coord_y.min() - 2 * min_diam,
coord_y.max() + 2 * min_diam])
# plt.plot(0, 0, 'r.', markersize=5) # Add | |
other : openmc.Tally or float
The tally or scalar value to divide this tally by
Returns
-------
openmc.Tally
A new derived tally which is the dividend of this tally and the
other tally or scalar value in the division.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
if isinstance(other, Tally):
new_tally = self.hybrid_product(other, binary_op='/')
# If original tally operands were sparse, sparsify the new tally
if self.sparse and other.sparse:
new_tally.sparse = True
elif isinstance(other, Real):
new_tally = Tally(name='derived')
new_tally._derived = True
new_tally.name = self.name
new_tally._mean = self.mean / other
new_tally._std_dev = self.std_dev * np.abs(1. / other)
new_tally.estimator = self.estimator
new_tally.with_summary = self.with_summary
new_tally.num_realizations = self.num_realizations
new_tally.filters = copy.deepcopy(self.filters)
new_tally.nuclides = copy.deepcopy(self.nuclides)
new_tally.scores = copy.deepcopy(self.scores)
# If this tally operand is sparse, sparsify the new tally
new_tally.sparse = self.sparse
else:
msg = 'Unable to divide Tally ID="{}" by "{}"'.format(self.id, other)
raise ValueError(msg)
return new_tally
def __div__(self, other):
return self.__truediv__(other)
def __pow__(self, power):
"""Raises this tally to another tally or scalar value power.
This method builds a new tally with data that is the power of
this tally's data to that from the other tally or scalar value. If the
filters, scores and nuclides in the two tallies are not the same, then
they are combined in all possible ways in the new derived tally.
Uncertainty propagation is used to compute the standard deviation
for the new tally's data. It is important to note that this makes
the assumption that the tally data is independently distributed.
In most use cases, this is *not* true and may lead to under-prediction
of the uncertainty. The uncertainty propagation model is from the
following source:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty
Parameters
----------
power : openmc.Tally or float
The tally or scalar value exponent
Returns
-------
openmc.Tally
A new derived tally which is this tally raised to the power of the
other tally or scalar value in the exponentiation.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
if isinstance(power, Tally):
new_tally = self.hybrid_product(power, binary_op='^')
# If original tally operand was sparse, sparsify the new tally
if self.sparse:
new_tally.sparse = True
elif isinstance(power, Real):
new_tally = Tally(name='derived')
new_tally._derived = True
new_tally.name = self.name
new_tally._mean = self._mean ** power
self_rel_err = self.std_dev / self.mean
new_tally._std_dev = np.abs(new_tally._mean * power * self_rel_err)
new_tally.estimator = self.estimator
new_tally.with_summary = self.with_summary
new_tally.num_realizations = self.num_realizations
new_tally.filters = copy.deepcopy(self.filters)
new_tally.nuclides = copy.deepcopy(self.nuclides)
new_tally.scores = copy.deepcopy(self.scores)
# If original tally was sparse, sparsify the exponentiated tally
new_tally.sparse = self.sparse
else:
msg = 'Unable to raise Tally ID="{}" to power "{}"'.format(self.id, power)
raise ValueError(msg)
return new_tally
def __radd__(self, other):
"""Right addition with a scalar value.
This reverses the operands and calls the __add__ method.
Parameters
----------
other : float
The scalar value to add to this tally
Returns
-------
openmc.Tally
A new derived tally of this tally added with the scalar value.
"""
return self + other
def __rsub__(self, other):
"""Right subtraction from a scalar value.
This reverses the operands and calls the __sub__ method.
Parameters
----------
other : float
The scalar value to subtract this tally from
Returns
-------
openmc.Tally
A new derived tally of this tally subtracted from the scalar value.
"""
return -1. * self + other
def __rmul__(self, other):
"""Right multiplication with a scalar value.
This reverses the operands and calls the __mul__ method.
Parameters
----------
other : float
The scalar value to multiply with this tally
Returns
-------
openmc.Tally
A new derived tally of this tally multiplied by the scalar value.
"""
return self * other
def __rdiv__(self, other):
"""Right division with a scalar value.
This reverses the operands and calls the __div__ method.
Parameters
----------
other : float
The scalar value to divide by this tally
Returns
-------
openmc.Tally
A new derived tally of the scalar value divided by this tally.
"""
return other * self**-1
def __abs__(self):
"""The absolute value of this tally.
Returns
-------
openmc.Tally
A new derived tally which is the absolute value of this tally.
"""
new_tally = copy.deepcopy(self)
new_tally._mean = np.abs(new_tally.mean)
return new_tally
def __neg__(self):
"""The negated value of this tally.
Returns
-------
openmc.Tally
A new derived tally which is the negated value of this tally.
"""
new_tally = self * -1
return new_tally
def get_slice(self, scores=[], filters=[], filter_bins=[], nuclides=[],
squeeze=False):
"""Build a sliced tally for the specified filters, scores and nuclides.
This method constructs a new tally to encapsulate a subset of the data
represented by this tally. The subset of data to include in the tally
slice is determined by the scores, filters and nuclides specified in
the input parameters.
Parameters
----------
scores : list of str
A list of one or more score strings (e.g., ['absorption',
'nu-fission']
filters : Iterable of openmc.FilterMeta
An iterable of filter types (e.g., [MeshFilter, EnergyFilter])
filter_bins : list of Iterables
A list of iterables of filter bins corresponding to the specified
filter types (e.g., [(1,), ((0., 0.625e-6),)]). Each iterable
contains bins to slice for the corresponding filter type in the
filters parameter. Each bin is the integer ID for 'material',
'surface', 'cell', 'cellborn', and 'universe' Filters. Each bin is
an integer for the cell instance ID for 'distribcell' Filters. Each
bin is a 2-tuple of floats for 'energy' and 'energyout' filters
corresponding to the energy boundaries of the bin of interest. The
bin is an (x,y,z) 3-tuple for 'mesh' filters corresponding to the
mesh cell of interest. The order of the bins in the list must
correspond to the `filters` argument.
nuclides : list of str
A list of nuclide name strings (e.g., ['U235', 'U238'])
squeeze : bool
Whether to remove filters with only a single bin in the sliced tally
Returns
-------
openmc.Tally
A new tally which encapsulates the subset of data requested in the
order each filter, nuclide and score is listed in the parameters.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Ensure that the tally has data
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
# Create deep copy of tally to return as sliced tally
new_tally = copy.deepcopy(self)
new_tally._derived = True
# Differentiate Tally with a new auto-generated Tally ID
new_tally.id = None
new_tally.sparse = False
if not self.derived and self.sum is not None:
new_sum = self.get_values(scores, filters, filter_bins,
nuclides, 'sum')
new_tally.sum = new_sum
if not self.derived and self.sum_sq is not None:
new_sum_sq = self.get_values(scores, filters, filter_bins,
nuclides, 'sum_sq')
new_tally.sum_sq = new_sum_sq
if self.mean is not None:
new_mean = self.get_values(scores, filters, filter_bins,
nuclides, 'mean')
new_tally._mean = new_mean
if self.std_dev is not None:
new_std_dev = self.get_values(scores, filters, filter_bins,
nuclides, 'std_dev')
new_tally._std_dev = new_std_dev
# SCORES
if scores:
score_indices = []
# Determine the score indices from any of the requested scores
for score in self.scores:
if score not in scores:
score_index = self.get_score_index(score)
score_indices.append(score_index)
# Loop over indices in reverse to remove excluded scores
for score_index in reversed(score_indices):
new_tally.remove_score(self.scores[score_index])
# NUCLIDES
if nuclides:
nuclide_indices = []
# Determine the nuclide indices from any of the requested nuclides
for nuclide in self.nuclides:
if nuclide.name not in nuclides:
nuclide_index = self.get_nuclide_index(nuclide.name)
nuclide_indices.append(nuclide_index)
# Loop over indices in reverse to remove excluded Nuclides
for nuclide_index in reversed(nuclide_indices):
new_tally.remove_nuclide(self.nuclides[nuclide_index])
# FILTERS
if filters:
# Determine the filter | |
>>>
>>> client = datalabeling_v1beta1.DataLabelingServiceClient()
>>>
>>> name = client.data_item_path('[PROJECT]', '[DATASET]', '[DATA_ITEM]')
>>>
>>> response = client.get_data_item(name)
Args:
name (str): Required. The name of the data item to get, format:
projects/{project\_id}/datasets/{dataset\_id}/dataItems/{data\_item\_id}
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.datalabeling_v1beta1.types.DataItem` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_data_item" not in self._inner_api_calls:
self._inner_api_calls[
"get_data_item"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_data_item,
default_retry=self._method_configs["GetDataItem"].retry,
default_timeout=self._method_configs["GetDataItem"].timeout,
client_info=self._client_info,
)
request = data_labeling_service_pb2.GetDataItemRequest(name=name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_data_item"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_data_items(
self,
parent,
filter_=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists data items in a dataset. This API can be called after data
are imported into dataset. Pagination is supported.
Example:
>>> from google.cloud import datalabeling_v1beta1
>>>
>>> client = datalabeling_v1beta1.DataLabelingServiceClient()
>>>
>>> parent = client.dataset_path('[PROJECT]', '[DATASET]')
>>>
>>> # Iterate over all results
>>> for element in client.list_data_items(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_data_items(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Required. Name of the dataset to list data items, format:
projects/{project\_id}/datasets/{dataset\_id}
filter_ (str): Optional. Filter is not supported at this moment.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~google.cloud.datalabeling_v1beta1.types.DataItem` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_data_items" not in self._inner_api_calls:
self._inner_api_calls[
"list_data_items"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_data_items,
default_retry=self._method_configs["ListDataItems"].retry,
default_timeout=self._method_configs["ListDataItems"].timeout,
client_info=self._client_info,
)
request = data_labeling_service_pb2.ListDataItemsRequest(
parent=parent, filter=filter_, page_size=page_size
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_data_items"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="data_items",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def get_annotated_dataset(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets an annotated dataset by resource name.
Example:
>>> from google.cloud import datalabeling_v1beta1
>>>
>>> client = datalabeling_v1beta1.DataLabelingServiceClient()
>>>
>>> name = client.annotated_dataset_path('[PROJECT]', '[DATASET]', '[ANNOTATED_DATASET]')
>>>
>>> response = client.get_annotated_dataset(name)
Args:
name (str): Required. Name of the annotated dataset to get, format:
projects/{project\_id}/datasets/{dataset\_id}/annotatedDatasets/
{annotated\_dataset\_id}
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.datalabeling_v1beta1.types.AnnotatedDataset` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_annotated_dataset" not in self._inner_api_calls:
self._inner_api_calls[
"get_annotated_dataset"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_annotated_dataset,
default_retry=self._method_configs["GetAnnotatedDataset"].retry,
default_timeout=self._method_configs["GetAnnotatedDataset"].timeout,
client_info=self._client_info,
)
request = data_labeling_service_pb2.GetAnnotatedDatasetRequest(name=name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_annotated_dataset"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_annotated_datasets(
self,
parent,
filter_=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists annotated datasets for a dataset. Pagination is supported.
Example:
>>> from google.cloud import datalabeling_v1beta1
>>>
>>> client = datalabeling_v1beta1.DataLabelingServiceClient()
>>>
>>> parent = client.dataset_path('[PROJECT]', '[DATASET]')
>>>
>>> # Iterate over all results
>>> for element in client.list_annotated_datasets(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_annotated_datasets(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Required. Name of the dataset to list annotated datasets, format:
projects/{project\_id}/datasets/{dataset\_id}
filter_ (str): Optional. Filter is not supported at this moment.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~google.cloud.datalabeling_v1beta1.types.AnnotatedDataset` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_annotated_datasets" not in self._inner_api_calls:
self._inner_api_calls[
"list_annotated_datasets"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_annotated_datasets,
default_retry=self._method_configs["ListAnnotatedDatasets"].retry,
default_timeout=self._method_configs["ListAnnotatedDatasets"].timeout,
client_info=self._client_info,
)
request = data_labeling_service_pb2.ListAnnotatedDatasetsRequest(
parent=parent, filter=filter_, page_size=page_size
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_annotated_datasets"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="annotated_datasets",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def label_image(
self,
parent,
basic_config,
feature,
image_classification_config=None,
bounding_poly_config=None,
polyline_config=None,
segmentation_config=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Starts a labeling task for image. The type of image labeling task is
configured by feature in the request.
Example:
>>> from google.cloud import datalabeling_v1beta1
>>> from google.cloud.datalabeling_v1beta1 import enums
>>>
>>> client = datalabeling_v1beta1.DataLabelingServiceClient()
>>>
>>> parent = client.dataset_path('[PROJECT]', '[DATASET]')
>>>
>>> # TODO: Initialize `basic_config`:
>>> basic_config = {}
>>>
>>> # TODO: Initialize `feature`:
>>> feature = enums.LabelImageRequest.Feature.FEATURE_UNSPECIFIED
>>>
>>> response = client.label_image(parent, basic_config, feature)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. Name of the dataset to request labeling task, format:
projects/{project\_id}/datasets/{dataset\_id}
basic_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.HumanAnnotationConfig]): Required. Basic human annotation config.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.HumanAnnotationConfig`
feature (~google.cloud.datalabeling_v1beta1.types.Feature): Required. The type of image labeling task.
image_classification_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.ImageClassificationConfig]): Configuration for image classification task. One of
image\_classification\_config, bounding\_poly\_config, polyline\_config
and segmentation\_config are required.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.ImageClassificationConfig`
| |
<filename>docker/dockerfile/xfdsend/Dscapy/layers/l2.py
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) <NAME> <<EMAIL>>
## This program is published under a GPLv2 license
import os,struct,time
from scapy.base_classes import Net
from scapy.config import conf
from scapy.packet import *
from scapy.ansmachine import *
from scapy.plist import SndRcvList
from scapy.fields import *
from scapy.sendrecv import srp,srp1
from scapy.arch import get_if_hwaddr
#################
## Tools ##
#################
class Neighbor:
def __init__(self):
self.resolvers = {}
def register_l3(self, l2, l3, resolve_method):
self.resolvers[l2,l3]=resolve_method
def resolve(self, l2inst, l3inst):
k = l2inst.__class__,l3inst.__class__
if k in self.resolvers:
return self.resolvers[k](l2inst,l3inst)
def __repr__(self):
return "\n".join("%-15s -> %-15s" % (l2.__name__, l3.__name__) for l2,l3 in self.resolvers)
conf.neighbor = Neighbor()
conf.netcache.new_cache("arp_cache", 120) # cache entries expire after 120s
@conf.commands.register
def getmacbyip(ip, chainCC=0):
"""Return MAC address corresponding to a given IP address"""
if isinstance(ip,Net):
ip = iter(ip).next()
tmp = map(ord, inet_aton(ip))
if (tmp[0] & 0xf0) == 0xe0: # mcast @
return "01:00:5e:%.2x:%.2x:%.2x" % (tmp[1]&0x7f,tmp[2],tmp[3])
iff,a,gw = conf.route.route(ip)
if ( (iff == "lo") or (ip == conf.route.get_if_bcast(iff)) ):
return "ff:ff:ff:ff:ff:ff"
if gw != "0.0.0.0":
ip = gw
mac = conf.netcache.arp_cache.get(ip)
if mac:
return mac
res = srp1(Ether(dst=ETHER_BROADCAST)/ARP(op="who-has", pdst=ip),
type=ETH_P_ARP,
iface = iff,
timeout=2,
verbose=0,
chainCC=chainCC,
nofilter=1)
if res is not None:
mac = res.payload.hwsrc
conf.netcache.arp_cache[ip] = mac
return mac
return None
### Fields
class DestMACField(MACField):
def __init__(self, name):
MACField.__init__(self, name, None)
def i2h(self, pkt, x):
if x is None:
x = conf.neighbor.resolve(pkt,pkt.payload)
if x is None:
x = "ff:ff:ff:ff:ff:ff"
warning("Mac address to reach destination not found. Using broadcast.")
return MACField.i2h(self, pkt, x)
def i2m(self, pkt, x):
return MACField.i2m(self, pkt, self.i2h(pkt, x))
class SourceMACField(MACField):
def __init__(self, name):
MACField.__init__(self, name, None)
def i2h(self, pkt, x):
if x is None:
iff,a,gw = pkt.payload.route()
if iff:
try:
x = get_if_hwaddr(iff)
except:
pass
if x is None:
x = "00:00:00:00:00:00"
return MACField.i2h(self, pkt, x)
def i2m(self, pkt, x):
return MACField.i2m(self, pkt, self.i2h(pkt, x))
class ARPSourceMACField(MACField):
def __init__(self, name):
MACField.__init__(self, name, None)
def i2h(self, pkt, x):
if x is None:
iff,a,gw = pkt.route()
if iff:
try:
x = get_if_hwaddr(iff)
except:
pass
if x is None:
x = "00:00:00:00:00:00"
return MACField.i2h(self, pkt, x)
def i2m(self, pkt, x):
return MACField.i2m(self, pkt, self.i2h(pkt, x))
### Layers
class Ether(Packet):
name = "Ethernet"
fields_desc = [ DestMACField("dst"),
SourceMACField("src"),
XShortEnumField("type", 0x0000, ETHER_TYPES) ]
def hashret(self):
return struct.pack("H",self.type)+self.payload.hashret()
def answers(self, other):
if isinstance(other,Ether):
if self.type == other.type:
return self.payload.answers(other.payload)
return 0
def mysummary(self):
return self.sprintf("%src% > %dst% (%type%)")
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt and len(_pkt) >= 14:
if struct.unpack("!H", _pkt[12:14])[0] <= 1500:
return Dot3
return cls
class Dot3TagNoLen(Packet):
name = "802.3Tag,no length segment."
fields_desc = [ BitField("prio", 0, 3),
BitField("id", 0, 1),
BitField("vlan", 1, 12), ]
class Dot3(Packet):
name = "802.3"
fields_desc = [ DestMACField("dst"),
MACField("src", ETHER_ANY),
LenField("len", None, "H") ]
def extract_padding(self,s):
l = self.len
return s[:l],s[l:]
def answers(self, other):
if isinstance(other,Dot3):
return self.payload.answers(other.payload)
return 0
def mysummary(self):
return "802.3 %s > %s" % (self.src, self.dst)
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt and len(_pkt) >= 14:
if struct.unpack("!H", _pkt[12:14])[0] > 1500:
return Ether
return cls
class Dot3Tag(Packet):
name = "802.3Tag"
fields_desc = [ BitField("prio", 0, 3),
BitField("id", 0, 1),
BitField("vlan", 1, 12),
LenField("len", None, "H") ]
class LLC(Packet):
name = "LLC"
fields_desc = [ XByteField("dsap", 0x00),
XByteField("ssap", 0x00),
ByteField("ctrl", 0) ]
conf.neighbor.register_l3(Ether, LLC, lambda l2,l3: conf.neighbor.resolve(l2,l3.payload))
conf.neighbor.register_l3(Dot3, LLC, lambda l2,l3: conf.neighbor.resolve(l2,l3.payload))
class CookedLinux(Packet):
name = "cooked linux"
fields_desc = [ ShortEnumField("pkttype",0, {0: "unicast",
4:"sent-by-us"}), #XXX incomplete
XShortField("lladdrtype",512),
ShortField("lladdrlen",0),
StrFixedLenField("src","",8),
XShortEnumField("proto",0x800,ETHER_TYPES) ]
class SNAP(Packet):
name = "SNAP"
fields_desc = [ X3BytesField("OUI",0x000000),
XShortEnumField("code", 0x000, ETHER_TYPES) ]
conf.neighbor.register_l3(Dot3, SNAP, lambda l2,l3: conf.neighbor.resolve(l2,l3.payload))
class Dot1Q(Packet):
name = "802.1Q"
aliastypes = [ Ether ]
fields_desc = [ BitField("prio", 0, 3),
BitField("id", 0, 1),
BitField("vlan", 1, 12),
XShortEnumField("type", 0x0000, ETHER_TYPES) ]
def answers(self, other):
if isinstance(other,Dot1Q):
if ( (self.type == other.type) and
(self.vlan == other.vlan) ):
return self.payload.answers(other.payload)
else:
return self.payload.answers(other)
return 0
def default_payload_class(self, pay):
if self.type <= 1500:
return LLC
return Raw
def extract_padding(self,s):
if self.type <= 1500:
return s[:self.type],s[self.type:]
return s,None
def mysummary(self):
if isinstance(self.underlayer, Ether):
return self.underlayer.sprintf("802.1q %Ether.src% > %Ether.dst% (%Dot1Q.type%) vlan %Dot1Q.vlan%")
else:
return self.sprintf("802.1q (%Dot1Q.type%) vlan %Dot1Q.vlan%")
conf.neighbor.register_l3(Ether, Dot1Q, lambda l2,l3: conf.neighbor.resolve(l2,l3.payload))
class STP(Packet):
name = "Spanning Tree Protocol"
fields_desc = [ ShortField("proto", 0),
ByteField("version", 0),
ByteField("bpdutype", 0),
ByteField("bpduflags", 0),
ShortField("rootid", 0),
MACField("rootmac", ETHER_ANY),
IntField("pathcost", 0),
ShortField("bridgeid", 0),
MACField("bridgemac", ETHER_ANY),
ShortField("portid", 0),
BCDFloatField("age", 1),
BCDFloatField("maxage", 20),
BCDFloatField("hellotime", 2),
BCDFloatField("fwddelay", 15) ]
#added by zhangpengi
class MSTP(Packet):
name = "Multiple Spanning Tree Protocol"
fields_desc = [ ShortField("proto", 0),
ByteField("version", 0),
ByteField("bpdutype", 0),
ByteField("bpduflags", 0),
ShortField("rootid", 0),
MACField("rootmac", ETHER_ANY),
IntField("pathcost", 0),
ShortField("bridgeid", 0),
MACField("bridgemac", ETHER_ANY),
ShortField("portid", 0),
BCDFloatField("age", 1),
BCDFloatField("maxage", 20),
BCDFloatField("hellotime", 2),
BCDFloatField("fwddelay", 15),
ByteField("version1len", 0),
ShortField("version3len", 0),
]
#added by zhangpengi
class MSTPExt(Packet):
name = "Multiple Spanning Tree Protocol Extension"
fields_desc = [ ByteField("id", 116),
StrChangLenField("names", None),
XLongField("digesth", 0xB41829F9030a054F),
XLongField("digestl", 0xB74EF7A8587FF58D),
ShortField("bridgeid", 32768),
MACField("bridgemac", "00:03:0f:00:00:00"),
IntField("pathcost", 1),
ByteField("hops", 20),
]
#added by zhangpengi
class MSTID(Packet):
name = "Multiple Spanning Tree ID"
fields_desc = [ ShortField("id", 1),
ByteField("flags", 0),
ShortField("rootid", 0),
MACField("rootmac", "00:00:00:00:00:01"),
ShortField("bridgeid", 0),
MACField("bridgemac", "00:00:00:01:00:00"),
ShortField("portid", 0),
ByteField("hops", 2),
]
class EAPOL(Packet):
name = "EAPOL"
fields_desc = [ ByteField("version", 1),
ByteEnumField("type", 0, ["EAP_PACKET", "START", "LOGOFF", "KEY", "ASF"]),
LenField("len", None, "H") ]
EAP_PACKET= 0
START = 1
LOGOFF = 2
KEY = 3
ASF = 4
def extract_padding(self, s):
l = self.len
return s[:l],s[l:]
def hashret(self):
return chr(self.type)+self.payload.hashret()
def answers(self, other):
if isinstance(other,EAPOL):
if ( (self.type == self.EAP_PACKET) and
(other.type == self.EAP_PACKET) ):
return self.payload.answers(other.payload)
return 0
def mysummary(self):
return self.sprintf("EAPOL %EAPOL.type%")
class EAP(Packet):
name = "EAP"
fields_desc = [ ByteEnumField("code", 4, {1:"REQUEST",2:"RESPONSE",3:"SUCCESS",4:"FAILURE"}),
ByteField("id", 0),
ShortField("len",None),
ConditionalField(ByteEnumField("type",0, {1:"ID",4:"MD5"}), lambda pkt:pkt.code not in [EAP.SUCCESS, EAP.FAILURE])
]
REQUEST = 1
RESPONSE = 2
SUCCESS = 3
FAILURE = 4
TYPE_ID = 1
TYPE_MD5 = 4
def answers(self, other):
if isinstance(other,EAP):
if self.code == self.REQUEST:
return 0
elif self.code == self.RESPONSE:
if ( (other.code == self.REQUEST) and
(other.type == self.type) ):
return 1
elif other.code == self.RESPONSE:
return 1
return 0
def post_build(self, p, pay):
if self.len is None:
l = len(p)+len(pay)
p = p[:2]+chr((l>>8)&0xff)+chr(l&0xff)+p[4:]
return p+pay
class ARP(Packet):
name = "ARP"
fields_desc = [ XShortField("hwtype", 0x0001),
XShortEnumField("ptype", 0x0800, ETHER_TYPES),
ByteField("hwlen", 6),
ByteField("plen", 4),
ShortEnumField("op", 1, {"who-has":1, "is-at":2, "RARP-req":3, "RARP-rep":4, "Dyn-RARP-req":5, "Dyn-RAR-rep":6, "Dyn-RARP-err":7, "InARP-req":8, "InARP-rep":9}),
ARPSourceMACField("hwsrc"),
SourceIPField("psrc","pdst"),
MACField("hwdst", ETHER_ANY),
IPField("pdst", "0.0.0.0") ]
who_has = 1
is_at = 2
def answers(self, other):
if isinstance(other,ARP):
if ( (self.op == self.is_at) and
(other.op == self.who_has) and
(self.psrc == other.pdst) ):
return 1
return 0
def route(self):
dst = self.pdst
if isinstance(dst,Gen):
dst = iter(dst).next()
return conf.route.route(dst)
def extract_padding(self, s):
return "",s
def mysummary(self):
if self.op == self.is_at:
return self.sprintf("ARP is at %hwsrc% says %psrc%")
elif self.op == self.who_has:
return self.sprintf("ARP who has %pdst% says %psrc%")
else:
return self.sprintf("ARP %op% %psrc% > %pdst%")
conf.neighbor.register_l3(Ether, ARP, lambda l2,l3: getmacbyip(l3.pdst))
class GRE(Packet):
name = "GRE"
fields_desc = [ BitField("chksumpresent",0,1),
BitField("reserved0",0,12),
BitField("version",0,3),
XShortEnumField("proto", 0x0000, ETHER_TYPES),
ConditionalField(XShortField("chksum",None),lambda pkt:pkt.chksumpresent==1),
ConditionalField(XShortField("reserved1",None),lambda pkt:pkt.chksumpresent==1),
]
def post_build(self, p, pay):
p += pay
if self.chksumpresent and self.chksum is None:
c = checksum(p)
p = p[:4]+chr((c>>8)&0xff)+chr(c&0xff)+p[6:]
return p
bind_layers( Dot3, LLC, )
bind_layers( Ether, LLC, type=122)
bind_layers( Ether, Dot1Q, type=33024)
bind_layers( Ether, Ether, type=1)
bind_layers( Ether, ARP, type=2054)
bind_layers( Ether, EAPOL, type=34958)
bind_layers( Ether, EAPOL, dst='01:80:c2:00:00:03', type=34958)
bind_layers( CookedLinux, LLC, proto=122)
bind_layers( CookedLinux, Dot1Q, proto=33024)
bind_layers( CookedLinux, Ether, proto=1)
bind_layers( CookedLinux, ARP, proto=2054)
bind_layers( CookedLinux, EAPOL, proto=34958)
bind_layers( GRE, LLC, proto=122)
bind_layers( GRE, Dot1Q, proto=33024)
bind_layers( GRE, Ether, proto=1)
bind_layers( GRE, ARP, proto=2054)
bind_layers( GRE, EAPOL, proto=34958)
bind_layers( EAPOL, EAP, type=0)
bind_layers( LLC, STP, dsap=66, ssap=66, ctrl=3)
bind_layers( LLC, SNAP, dsap=170, ssap=170, ctrl=3)
bind_layers( SNAP, Dot1Q, code=33024)
bind_layers( SNAP, Ether, code=1)
bind_layers( SNAP, ARP, code=2054)
bind_layers( SNAP, EAPOL, code=34958)
bind_layers( SNAP, STP, code=267)
conf.l2types.register(ARPHDR_ETHER, Ether)
conf.l2types.register_num2layer(ARPHDR_METRICOM, Ether)
conf.l2types.register_num2layer(ARPHDR_LOOPBACK, Ether)
conf.l2types.register_layer2num(ARPHDR_ETHER, Dot3)
conf.l2types.register(113, CookedLinux)
conf.l2types.register(144, CookedLinux) # called LINUX_IRDA, similar to CookedLinux
conf.l3types.register(ETH_P_ARP, ARP)
### Technics
@conf.commands.register
def arpcachepoison(target, victim, interval=60):
"""Poison target's cache with (your MAC,victim's IP) couple
arpcachepoison(target, victim, [interval=60]) -> None
"""
tmac = getmacbyip(target)
p = Ether(dst=tmac)/ARP(op="who-has", psrc=victim, pdst=target)
try:
while 1:
sendp(p, iface_hint=target)
if conf.verb > 1:
os.write(1,".")
time.sleep(interval)
except KeyboardInterrupt:
pass
class ARPingResult(SndRcvList):
def __init__(self, res=None, name="ARPing", stats=None):
SndRcvList.__init__(self, res, name, stats)
def show(self):
for s,r in self.res:
print r.sprintf("%19s,Ether.src% %ARP.psrc%")
@conf.commands.register
def arping(net, timeout=2, cache=0, verbose=None, **kargs):
"""Send ARP who-has requests to determine which hosts are up
arping(net, [cache=0,] [iface=conf.iface,] [verbose=conf.verb]) -> None
Set cache=True if | |
<filename>contabilidad/views.py
# -*- coding: utf-8 -*-
from datetime import date, timedelta
import os
import base64
import simplejson as json
import re
import string
import locale
import pdfkit
import logging
import xlwt
from xlwt import Formula
from django.shortcuts import render, redirect
from django.core.paginator import Paginator
from django.core.files.base import ContentFile
from django.utils.timezone import datetime
from django.utils.text import slugify
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.db.models import Q, Sum
from django import forms
from django.http import HttpResponse, JsonResponse
from django.template.loader import render_to_string
from autenticar.models import Gauser
from entidades.models import Subentidad, Cargo, Gauser_extra
from gauss.rutas import *
from gauss.funciones import usuarios_de_gauss, pass_generator, usuarios_ronda, get_dce
from contabilidad.models import Presupuesto, Partida, Asiento, Politica_cuotas, Remesa, File_contabilidad, \
Remesa_emitida, OrdenAdeudo
from autenticar.control_acceso import permiso_required
from mensajes.views import crear_aviso
from mensajes.models import Aviso
from django.urls import reverse
from django.http import HttpResponseRedirect
locale.setlocale(locale.LC_ALL, 'es_ES.utf8')
logger = logging.getLogger('django')
class PartidaForm(forms.ModelForm):
class Meta:
model = Partida
exclude = ('presupuesto',)
class File_contabilidadForm(forms.ModelForm):
class Meta:
model = File_contabilidad
fields = ('fichero',)
class AsientoForm(forms.ModelForm): # Form accesible por usuario
def __init__(self, *args, **kwargs):
self.presupuesto = kwargs.pop("presupuesto")
super(AsientoForm, self).__init__(*args, **kwargs)
self.fields["partida"].queryset = Partida.objects.filter(presupuesto=self.presupuesto)
class Meta:
model = Asiento
fields = ('concepto', 'nombre', 'cantidad', 'partida', 'escaneo')
widgets = { # 'concepto': forms.Textarea(attrs={'cols': 50, 'rows':4, 'class':'obligatorio'}),
'concepto': forms.TextInput(attrs={'size': '100', 'class': 'obligatorio'}),
# 'nombre': forms.TextInput(attrs={'class':'obligatorio','size':150}),
'nombre': forms.Textarea(attrs={'cols': 100, 'rows': 1, 'class': 'obligatorio'}),
'cantidad': forms.TextInput(attrs={'class': 'obligatorio', 'size': 15}),
}
@permiso_required('acceso_presupuestos')
def presupuestos(request):
g_e = request.session["gauser_extra"]
if request.method == 'POST':
if request.POST['action'] == 'pdf_presupuesto':
dce = get_dce(g_e.ronda.entidad, 'Configuración de informes de contabilidad')
presupuesto = Presupuesto.objects.get(id=request.POST['id_presupuesto'], entidad=g_e.ronda.entidad)
partidas = Partida.objects.filter(presupuesto=presupuesto)
gastos = partidas.filter(tipo='GASTO').aggregate(gasto_total=Sum('cantidad'))
ingresos = partidas.filter(tipo='INGRE').aggregate(ingreso_total=Sum('cantidad'))
fichero = 'presupuesto_%s_%s' % (g_e.ronda.entidad.id, presupuesto.id)
c = render_to_string('presupuesto2pdf.html',
{'presupuesto': presupuesto, 'partidas': partidas, 'gastos': gastos,
'ingresos': ingresos})
fich = pdfkit.from_string(c, False, dce.get_opciones)
logger.info('%s, pdf_presupuesto' % g_e)
response = HttpResponse(fich, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename=' + fichero + '.pdf'
return response
elif request.POST['action'] == 'add_presupuesto':
describir = request.POST['describir']
describir = 'No hay descripción para este presupesto.' if len(describir) < 5 else describir
Presupuesto.objects.create(nombre=request.POST['nombre'], describir=describir, entidad=g_e.ronda.entidad)
elif request.POST['action'] == 'mod_presupuesto':
presupuesto = Presupuesto.objects.get(id=request.POST['id_presupuesto'], entidad=g_e.ronda.entidad)
presupuesto.nombre = request.POST['nombre']
presupuesto.describir = request.POST['describir']
presupuesto.save()
elif request.POST['action'] == 'borrar_presupuesto':
presupuesto = Presupuesto.objects.get(id=request.POST['id_presupuesto'], entidad=g_e.ronda.entidad)
if Partida.objects.filter(presupuesto=presupuesto).count() == 0:
presupuesto.delete()
else:
crear_aviso(request, False, 'El presupuesto no se puede borrar porque contiene partidas')
elif request.POST['action'] == 'copiar_presupuesto':
presupuesto = Presupuesto.objects.get(id=request.POST['id_presupuesto'], entidad=g_e.ronda.entidad)
p = Presupuesto.objects.create(entidad=g_e.ronda.entidad, nombre=presupuesto.nombre + ' (copia)',
describir=presupuesto.describir)
partidas = Partida.objects.filter(presupuesto=presupuesto)
for partida in partidas:
Partida.objects.create(presupuesto=p, tipo=partida.tipo, nombre=partida.nombre,
cantidad=partida.cantidad)
elif request.POST['action'] == 'archivar_presupuesto':
presupuesto = Presupuesto.objects.get(id=request.POST['id_presupuesto'], entidad=g_e.ronda.entidad)
presupuesto.archivado = True
presupuesto.save()
elif request.POST['action'] == 'abrir_presupuesto':
presupuesto = Presupuesto.objects.get(id=request.POST['id_presupuesto'], entidad=g_e.ronda.entidad)
presupuesto.archivado = False
presupuesto.save()
presupuestos = Presupuesto.objects.filter(entidad=g_e.ronda.entidad)
return render(request, "presupuestos_list.html",
{
'formname': 'Presupuestos',
'iconos':
({'tipo': 'button', 'nombre': 'check', 'texto': 'Aceptar',
'title': 'Aceptar los cambios realizados', 'permiso': 'edita_presupuestos'},
{'tipo': 'button', 'nombre': 'plus', 'texto': 'Presupuesto',
'title': 'Crear un nuevo presupuesto',
'permiso': 'crea_presupuestos'},
{'tipo': 'button', 'nombre': 'list-alt', 'texto': 'Presupuestos',
'title': 'Mostrar la lista de presupuestos',
'permiso': 'edita_presupuestos'},),
'presupuestos': presupuestos,
'avisos': Aviso.objects.filter(usuario=request.session["gauser_extra"],
aceptado=False),
})
@permiso_required('acceso_gastos_ingresos')
def presupuesto(request, id=False):
if id:
g_e = request.session["gauser_extra"]
try:
presupuesto = Presupuesto.objects.get(entidad=g_e.ronda.entidad, id=id)
except:
return HttpResponseRedirect(reverse('presupuestos'))
else:
return HttpResponseRedirect(reverse('presupuestos'))
partidas = Partida.objects.filter(presupuesto=presupuesto)
if request.method == 'POST':
if request.POST['action'] == 'pdf_presupuesto':
dce = get_dce(g_e.ronda.entidad, 'Configuración de informes de contabilidad')
presupuesto = Presupuesto.objects.get(id=request.POST['id_presupuesto'], entidad=g_e.ronda.entidad)
partidas = Partida.objects.filter(presupuesto=presupuesto)
gastos = partidas.filter(tipo='GASTO').aggregate(gasto_total=Sum('cantidad'))
ingresos = partidas.filter(tipo='INGRE').aggregate(ingreso_total=Sum('cantidad'))
fichero = 'presupuesto_%s_%s' % (g_e.ronda.entidad.id, g_e.ronda.id)
c = render_to_string('presupuesto2pdf.html',
{'presupuesto': presupuesto, 'partidas': partidas, 'gastos': gastos,
'ingresos': ingresos})
fich = pdfkit.from_string(c, False, dce.get_opciones)
response = HttpResponse(fich, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename=' + fichero + '.pdf'
return response
gastos = partidas.filter(tipo='GASTO').aggregate(gasto_total=Sum('cantidad'))
ingresos = partidas.filter(tipo='INGRE').aggregate(ingreso_total=Sum('cantidad'))
return render(request, "presupuesto.html",
{
'formname': 'Presupuesto',
'iconos':
({'tipo': 'button', 'nombre': 'check', 'texto': 'Aceptar',
'title': 'Aceptar los cambios realizados', 'permiso': 'edita_presupuestos'},
{'tipo': 'button', 'nombre': 'plus', 'texto': 'Partida',
'title': 'Añadir nueva partida al presupuesto',
'permiso': 'edita_presupuestos'},
{'tipo': 'button', 'nombre': 'pencil', 'texto': 'Editar',
'title': 'Editar la partida para su modificación',
'permiso': 'edita_presupuestos'},
{'tipo': 'button', 'nombre': 'trash-o', 'texto': 'Borrar',
'title': 'Borrar la partida seleccionada', 'permiso': 'edita_presupuestos'},
{'tipo': 'button', 'nombre': 'file-text-o', 'texto': 'PDF',
'title': 'Generar documento pdf del presupuesto',
'permiso': 'edita_presupuestos'}),
'presupuesto': presupuesto,
'partidas': partidas,
'gastos': gastos,
'ingresos': ingresos,
'avisos': Aviso.objects.filter(usuario=request.session["gauser_extra"],
aceptado=False),
})
@login_required()
def presupuesto_ajax(request):
if request.is_ajax():
g_e = request.session['gauser_extra']
if request.POST['action'] == 'presupuesto_data':
presupuesto = Presupuesto.objects.get(id=request.POST['id_presupuesto'], entidad=g_e.ronda.entidad)
data = {'nombre': presupuesto.nombre, 'describir': presupuesto.describir}
return HttpResponse(json.dumps(data))
elif request.POST['action'] == 'add_partida':
form = PartidaForm()
html = render_to_string("add_partida.html", {'form': form, }, request=request)
return HttpResponse(html)
elif request.POST['action'] == 'mod_partida':
partida = Partida.objects.get(id=request.POST['id'])
form = PartidaForm(instance=partida)
html = render_to_string("mod_partida.html", {'form': form, }, request=request)
return HttpResponse(html)
elif request.POST['action'] == 'del_partida':
presupuesto = Presupuesto.objects.get(id=request.POST['id_presupuesto'], entidad=g_e.ronda.entidad)
Partida.objects.get(id=request.POST['id']).delete()
partidas = Partida.objects.filter(presupuesto=presupuesto)
gastos = partidas.filter(tipo='GASTO').aggregate(gasto_total=Sum('cantidad'))
ingresos = partidas.filter(tipo='INGRE').aggregate(ingreso_total=Sum('cantidad'))
html = render_to_string("list_partidas.html",
{'partidas': partidas, 'gastos': gastos, 'ingresos': ingresos, },
request=request)
return HttpResponse(html)
elif request.POST['action'] == 'save_partida_added':
presupuesto = Presupuesto.objects.get(id=request.POST['id_presupuesto'], entidad=g_e.ronda.entidad)
Partida.objects.create(presupuesto=presupuesto, tipo=request.POST['tipo'],
nombre=request.POST['nombre'], cantidad=request.POST['cantidad'])
partidas = Partida.objects.filter(presupuesto=presupuesto)
gastos = partidas.filter(tipo='GASTO').aggregate(gasto_total=Sum('cantidad'))
ingresos = partidas.filter(tipo='INGRE').aggregate(ingreso_total=Sum('cantidad'))
html = render_to_string("list_partidas.html",
{'partidas': partidas, 'gastos': gastos, 'ingresos': ingresos, },
request=request)
return HttpResponse(html)
elif request.POST['action'] == 'save_partida_modified':
presupuesto = Presupuesto.objects.get(id=request.POST['id_presupuesto'], entidad=g_e.ronda.entidad)
partida = Partida.objects.filter(id=request.POST['partida_id'])
partida.update(presupuesto=presupuesto, tipo=request.POST['tipo'], nombre=request.POST['nombre'],
cantidad=request.POST['cantidad'])
# Con update no hace falta save(), pero sin save() no se actualiza la fecha de modificación:
partida[0].save()
partidas = Partida.objects.filter(presupuesto=presupuesto)
gastos = partidas.filter(tipo='GASTO').aggregate(gasto_total=Sum('cantidad'))
ingresos = partidas.filter(tipo='INGRE').aggregate(ingreso_total=Sum('cantidad'))
html = render_to_string("list_partidas.html",
{'partidas': partidas, 'gastos': gastos, 'ingresos': ingresos, },
request=request)
return HttpResponse(html)
@permiso_required('acceso_gastos_ingresos')
def gastos_ingresos(request):
g_e = request.session["gauser_extra"]
if request.method == 'POST':
presupuestos = None
presupuesto = Presupuesto.objects.get(entidad=g_e.ronda.entidad, id=request.POST['id_presupuesto'])
asientos = Asiento.objects.filter(partida__presupuesto=presupuesto)
partidas_gastos = Partida.objects.filter(presupuesto=presupuesto, tipo='GASTO')
partidas_ingresos = Partida.objects.filter(presupuesto=presupuesto, tipo='INGRE')
if request.POST['action'] == 'pdf_gastos_ingresos':
dce = get_dce(g_e.ronda.entidad, 'Configuración de informes de contabilidad')
gi_gastos = []
for partida in partidas_gastos:
asientos_partida = Asiento.objects.filter(partida=partida)
total_partida = asientos_partida.aggregate(total=Sum('cantidad'))
gi_gastos.append([partida, asientos_partida, total_partida['total']])
gi_ingresos = []
for partida in partidas_ingresos:
asientos_partida = Asiento.objects.filter(partida=partida)
total_partida = asientos_partida.aggregate(total=Sum('cantidad'))
gi_ingresos.append([partida, asientos_partida, total_partida['total']])
fichero = 'gastos_ingresos_%s_%s' % (g_e.ronda.entidad.id, g_e.ronda.id)
c = render_to_string('gastos_ingresos2pdf.html', {
'gi_ingresos': gi_ingresos,
'gi_gastos': gi_gastos,
'g_total': asientos.filter(partida__tipo='GASTO').aggregate(total=Sum('cantidad'))['total'],
'i_total': asientos.filter(partida__tipo='INGRE').aggregate(total=Sum('cantidad'))['total'],
'pg_total': partidas_gastos.aggregate(total=Sum('cantidad'))['total'],
'pi_total': partidas_ingresos.aggregate(total=Sum('cantidad'))['total']})
fich = pdfkit.from_string(c, False, dce.get_opciones)
response = HttpResponse(fich, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename=' + fichero + '.pdf'
return response
if request.POST['action'] == 'bajar_justificante':
asiento = Asiento.objects.get(id=request.POST['asiento_id'])
url_file = asiento.escaneo.fichero.url
fichero = url_file.replace('/media/contabilidad/', MEDIA_CONTABILIDAD)
response = HttpResponse(open(fichero, 'rb'))
response['Content-Disposition'] = 'attachment; filename=' + asiento.escaneo.fich_name
return response
else:
presupuestos = Presupuesto.objects.filter(entidad=g_e.ronda.entidad, archivado=False)
if len(presupuestos) != 1:
presupuesto = None
data = ''
else:
presupuesto = presupuestos[0]
asientos = Asiento.objects.filter(partida__presupuesto=presupuesto)
data = render_to_string("list_asientos.html", {
'gi_ingresos': asientos.filter(partida__tipo='INGRE').reverse().order_by('modificado'),
'gi_gastos': asientos.filter(partida__tipo='GASTO').reverse().order_by('modificado'),
'g_total': asientos.filter(partida__tipo='GASTO').aggregate(total=Sum('cantidad'))['total'],
'i_total': asientos.filter(partida__tipo='INGRE').aggregate(total=Sum('cantidad'))['total'],
}, request=request)
iconos = ({'tipo': 'button', 'nombre': 'list-alt', 'texto': 'Ingresos/Gastos', 'permiso': 'edita_gastos_ingresos',
'title': 'Mostrar la lista de gastos e ingresos'},
{'tipo': 'button', 'nombre': 'check', 'texto': 'Aceptar', 'title': 'Guardar los cambios realizados',
'permiso': 'edita_gastos_ingresos'},
{'tipo': 'button', 'nombre': 'plus', 'texto': 'Gasto/Ingreso', 'title': 'Añadir nuevo gasto o ingreso',
'permiso': 'edita_gastos_ingresos'},
{'tipo': 'button', 'nombre': 'pencil', 'texto': 'Editar',
'title': 'Editar el gasto/ingreso para su modificación', 'permiso': 'edita_gastos_ingresos'},
{'tipo': 'button', 'nombre': 'trash-o', 'texto': 'Borrar',
'title': 'Borrar el ingreso o gasto seleccionado', 'permiso': 'edita_gastos_ingresos'},
{'tipo': 'button', 'nombre': 'file-pdf-o', 'texto': 'PDF', 'permiso': 'pdf_gastos_ingresos',
'title': 'Generar documento pdf con los gastos e ingresos registrados'})
return render(request, "gastos_ingresos.html",
{
'formname': 'Ingreso_gasto',
'iconos': iconos,
'data': data,
'presupuesto': presupuesto,
'presupuestos': presupuestos,
'avisos': Aviso.objects.filter(usuario=g_e, aceptado=False),
})
@login_required()
def gastos_ingresos_ajax(request):
if request.is_ajax():
g_e = request.session["gauser_extra"]
presupuesto = Presupuesto.objects.get(entidad=g_e.ronda.entidad, id=request.POST['id_presupuesto'])
if request.POST['action'] == 'muestra_presupuesto':
asientos = Asiento.objects.filter(partida__presupuesto=presupuesto)
gi_gastos = asientos.filter(partida__tipo='GASTO').reverse().order_by('modificado')
gi_ingresos = asientos.filter(partida__tipo='INGRE').reverse().order_by('modificado')
data = render_to_string("list_asientos.html", {
'gi_ingresos': gi_ingresos,
'gi_gastos': gi_gastos,
'g_total': asientos.filter(partida__tipo='GASTO').aggregate(total=Sum('cantidad'))['total'],
'i_total': asientos.filter(partida__tipo='INGRE').aggregate(total=Sum('cantidad'))['total'],
}, request=request)
return HttpResponse(data)
elif request.POST['action'] == 'borrar_asientos':
id_asientos = json.loads(request.POST['id_asientos'])
asientos = Asiento.objects.filter(id__in=id_asientos)
for asiento in asientos:
file_contabilidad = asiento.escaneo
asiento.delete()
if file_contabilidad:
os.remove(RUTA_BASE + file_contabilidad.fichero.url)
file_contabilidad.delete()
data = json.dumps({'borrados': id_asientos})
return HttpResponse(data)
elif request.POST['action'] == 'add_gasto_ingreso':
form1 = AsientoForm(presupuesto=presupuesto)
form2 = File_contabilidadForm()
data = render_to_string("add_gasto_ingreso.html", {'form1': form1, 'form2': form2},
request=request)
return HttpResponse(data)
elif request.POST['action'] == 'mod_gasto_ingreso':
asiento = Asiento.objects.get(id=request.POST['id'])
form1 = AsientoForm(instance=asiento, presupuesto=asiento.partida.presupuesto)
form2 = File_contabilidadForm(instance=asiento.escaneo)
data = render_to_string("add_gasto_ingreso.html", {'form1': form1, 'form2': form2},
request=request)
return HttpResponse(data)
elif request.POST['action'] == 'save_added_gasto_ingreso':
if 'fichero' in request.FILES:
fichero = request.FILES['fichero']
file_contabilidad = File_contabilidad.objects.create(entidad=g_e.ronda.entidad, fichero=fichero,
content_type=fichero.content_type)
else:
file_contabilidad = None
partida = Partida.objects.get(id=request.POST['partida'])
Asiento.objects.create(escaneo=file_contabilidad, partida=partida,
nombre=request.POST['nombre'], concepto=request.POST['concepto'],
cantidad=request.POST['cantidad'])
asientos = Asiento.objects.filter(partida__presupuesto=presupuesto)
gi_gastos = asientos.filter(partida__tipo='GASTO').reverse().order_by('modificado')
gi_ingresos = asientos.filter(partida__tipo='INGRE').reverse().order_by('modificado')
data = render_to_string("list_asientos.html", {
'gi_ingresos': gi_ingresos,
'gi_gastos': gi_gastos,
'g_total': asientos.filter(partida__tipo='GASTO').aggregate(total=Sum('cantidad'))['total'],
'i_total': asientos.filter(partida__tipo='INGRE').aggregate(total=Sum('cantidad'))['total'],
}, request=request)
return HttpResponse(data)
elif request.POST['action'] == 'save_mod_gasto_ingreso':
asiento = Asiento.objects.filter(id=request.POST['asiento_id'],
partida__presupuesto__entidad=g_e.ronda.entidad)
data = {'partida': request.POST['partida'], 'concepto': request.POST['concepto'],
'nombre': request.POST['nombre'], 'cantidad': request.POST['cantidad']}
if 'fichero' in request.FILES:
file_contabilidad_antiguo = asiento[0].escaneo
fichero = request.FILES['fichero']
file_contabilidad = File_contabilidad.objects.create(entidad=g_e.ronda.entidad, fichero=fichero,
content_type=fichero.content_type)
data['escaneo'] = file_contabilidad
asiento.update(**data)
if file_contabilidad_antiguo:
os.remove(RUTA_BASE + file_contabilidad_antiguo.fichero.url)
file_contabilidad_antiguo.delete()
else:
# | |
import os
class ATM:
def __init__(self, accountListFile, transactionSummary):
self.accountList = []
self.transactionList = []
self.depositRecord = {} # key: account, value: depositted amount
self.withdrawRecord = {} # key: account, value: withdrawn amount
self.transferRecord = {} # key: account, value: transferred amount
self.readList(accountListFile) # read account list to accountList
self.writeTarget = transactionSummary
self.interface()
# read account list and append it to accountList
def readList(self,fileName):
with open(fileName, 'r') as f:
line = f.readline()
while line:
self.accountList.append(line.strip())
line = f.readline()
# takes amount in cent such as "001"
# convert it to float, such as 0.01
def inputConvert(self, amount):
return float(amount[:-2] + "." + amount[-2:])
# main interface
def interface(self):
while True: # loop until accept 'logout' command
self.greeting()
function = input()
if function == "deposit":
signal = self.functionDeposit()
if signal == 0: break
elif function == "withdraw":
signal = self.functionWithdraw()
if signal == 0: break
elif function == "transfer":
signal = self.functionTransfer()
if signal == 0: break
elif function == "createacct":
signal = self.functionCreateAccount()
if signal == 0: break
elif function == "deleteacct":
signal = self.functionDeleteAccount()
if signal == 0: break
elif function == "logout":
break
else:
print("Check your input, and try again!")
self.functionLogout()
# print greeting when login to ATM mode
# overriden in agent.py
def greeting(self):
print("Hello, welcome to the ATM mode, please type the transaction you want to make: ")
# deposit logic frame
# get valid toAccount and amount,
# then append the transaction to transactionList
def functionDeposit(self):
toAccount = input("Enter your account please: ")
if toAccount == "logout":
return 0
while (not self.verifyAccount(toAccount) or not self.existAccount(toAccount)): # invalid format or non-exist
toAccount = input("--> ")
if toAccount == "logout":
return 0
amount = input("Enter the amount of money you want to deposit: ")
if amount == "logout":
return 0
while not self.verifyDepositAmount(toAccount,amount): # invalid amount
amount = input("--> ")
if amount == "logout":
return 0
transaction = "DEP {} {} 0000000 ***\n".format(toAccount, amount)
self.transactionList.append(transaction) # append transaction info to the transactionList
self.recordTransaction("deposit", toAccount, self.inputConvert(amount)) # record transaction
print("<-- Your transaction has been successfully made! -->")
print("=="*34)
return 1
# withdraw logic frame
# get valid fromAccount and amount,
# then append the transaction to the transactionList.
def functionWithdraw(self):
fromAccount = input("Enter your account please: ")
if fromAccount == "logout":
return 0
while (not self.verifyAccount(fromAccount) or not self.existAccount(fromAccount)): # invalid format or non-exist
fromAccount = input("--> ")
if fromAccount == "logout":
return 0
amount = input("Enter the amount of money you want to withdraw: ")
if amount == "logout":
return 0
while not self.verifyWithdrawAmount(fromAccount, amount): # invalid amount
amount = input("--> ")
if amount == "logout":
return 0
transaction = "WDR 0000000 {} {} ***\n".format(amount, fromAccount)
self.transactionList.append(transaction) # append transaction info to the transactionList
self.recordTransaction("withdraw", fromAccount, self.inputConvert(amount)) # record transaction
print("<-- Your transaction has been successfully made! -->")
print("=="*34)
return 1
# transfer logic frame
# get valid fromAccount, toAccount, and amount,
# then append the transaction to the transactionList.
def functionTransfer(self):
fromAccount = input("Enter your account please: ")
if fromAccount == "logout":
return 0
while (not self.verifyAccount(fromAccount) or not self.existAccount(fromAccount)): # invalid format or non-exist
fromAccount = input("--> ")
if fromAccount == "logout":
return 0
toAccount = input("Enter the payee account please: ")
if toAccount == "logout":
return 0
while (not self.verifyAccount(toAccount) or not self.existAccount(toAccount)): # invalid format or non-exist
toAccount = input("--> ")
if toAccount == "logout":
return 0
amount = input("Enter the amount of money you want to transfer: ")
if amount == "logout":
return 0
while not self.verifyTransferAmount(fromAccount, amount): # invalid amount
amount = input("--> ")
if amount == "logout":
return 0
transaction = "XFR {} {} {} ***\n".format(toAccount, amount, fromAccount )
self.transactionList.append(transaction) # append transaction info to transactionList
self.recordTransaction("transfer", fromAccount, self.inputConvert(amount)) # record transaction
print("<-- Your transaction has been successfully made! -->")
print("==" * 34)
return 1
# createacct (not supported in ATM mode)
# overriden in agent.py
def functionCreateAccount(self):
print("The operation is not supported in ATM mode")
return 1
# deleteacct (not supported in ATM mode)
# overriden in agent.py
def functionDeleteAccount(self):
print("The operation is not supported in ATM mode")
return 1
# logout
# write transaction summary file here
def functionLogout(self):
writeDir = os.path.dirname(self.writeTarget)
if not os.path.exists(writeDir):
os.makedirs(writeDir)
try:
f = open(self.writeTarget, "w")
self.transactionList.append("EOS") # transaction summary file end with "EOS"
f.writelines(self.transactionList) # write transaction summary file
f.close()
except:
print("file cannot found")
return 0
# record transaction in corresponding dict
# @transaction: type of transaction
# @account: account number
# @amount: the amount of the type of transaction
def recordTransaction(self, transaction, account, amount):
if transaction == "deposit":
currentAmount = self.depositRecord.get(account)
if currentAmount is None: # uninitialized amount will be None
currentAmount = 0
updatedAmount = currentAmount + amount
self.depositRecord[account] = updatedAmount
elif transaction == "withdraw":
currentAmount = self.withdrawRecord.get(account)
if currentAmount is None: # uninitialized amount will be None
currentAmount = 0
updatedAmount = currentAmount + amount
self.withdrawRecord[account] = updatedAmount
elif transaction == "transfer":
currentAmount = self.transferRecord.get(account)
if currentAmount is None: # uninitialized amount will be None
currentAmount = 0
updatedAmount = currentAmount + amount
self.transferRecord[account] = updatedAmount
# verify the account if is valid or not
# length exactly 7 digits and can not start with '0'
def verifyAccount(self,account):
if len(account) != 7:
print("The account has to be exactly 7 digits")
return False
elif account[0] == '0':
# change from the to The
print("The account cannot start with '0'")
return False
elif not account.isdigit():
print("The account cannot contain other characters")
return False
return True
# verify the amount is valid
# length is valid and only contain number
def verifyAmount(self, amount):
if len(amount) < 3 or len(amount) > 8:
print("Invalid amount, please try again!")
return False
elif not amount.isdigit():
print("Invalid amount, please try again!")
return False
elif amount[0] == '0' and len(amount) > 3: # such as "0111"
print("Invalid amount, please try again!")
return False
else:
return True
# further verfiy the deposit amount
# each deposit within $2,000
# daily deposit limit is $5,000
def verifyDepositAmount(self, account, amount):
if self.verifyAmount(amount):
if self.inputConvert(amount) > 2000: # each time deposit should <= $2,000
print("The limit of each deposit is $2,000, please retype: ")
return False
currentAmount = self.depositRecord.get(account)
if currentAmount is None or 5000 >= currentAmount + self.inputConvert(amount): # total deposit should <= $5,000
return True
else:
print("The daily limit of deposit is $5,000, ", end="")
print(f"you have used ${self.depositRecord.get(account):.2f}, please try again.")
return False
return False
# further verify the withdraw amount
# each withdraw within $1,000
# daily withdraw limit is $5,000
def verifyWithdrawAmount(self, account, amount):
if self.verifyAmount(amount):
if self.inputConvert(amount) > 1000: # each time withdraw should <= $1,000
print("The limit of each withdraw is $1,000, please retype: ")
return False
currentAmount = self.withdrawRecord.get(account)
if currentAmount is None or 5000 >= currentAmount + self.inputConvert(amount): # total withdraw amount should <= $5,000
return True
else:
print("The daliy limit of withdraw is $5,000, ", end="")
print(f"you have withdrawn ${self.withdrawRecord.get(account):.2f}, please try again.")
return False
return False
# further verify the transfer amount
# each transfer within $10,000
# daily transfer limit is $10,000
def verifyTransferAmount(self, fromAccount, amount):
if self.verifyAmount(amount):
if self.inputConvert(amount) > 10000: # each transfer should <= $10,000
print("The limit of each transfer is $10,000, please retype: ")
return False
currentAmount = self.transferRecord.get(fromAccount)
if currentAmount is None or 10000 >= currentAmount + self.inputConvert(amount): # total transfer should <= $10,000
return True
else:
print("The daliy limit of transfer is $10,000, ", end="")
print(f"you have transferred ${self.transferRecord.get(fromAccount):.2f}, please try again.")
return False
# binary search see if a account in the account list
def existAccount(self,account):
start = 0
end = len(self.accountList) - 2
while start | |
from typing import List, Optional, Text, Tuple
import numpy as np
from numpy import ndarray
from pandas import DataFrame, Timedelta
from pandas.core.series import Series
from pymove.preprocessing import filters
from pymove.utils.constants import (
ADDRESS,
CITY,
DATETIME,
DIST_EVENT,
DIST_HOME,
DIST_POI,
EVENT_ID,
EVENT_TYPE,
GEOMETRY,
HOME,
ID_POI,
LATITUDE,
LONGITUDE,
NAME_POI,
TRAJ_ID,
TYPE_POI,
VIOLATING,
)
from pymove.utils.distances import haversine
from pymove.utils.log import progress_bar
def union_poi_bank(data: DataFrame, label_poi: Optional[Text] = TYPE_POI):
"""
Performs the union between the different bank categories
for Points of Interest in a single category named 'banks'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
"""
print('union bank categories to one category')
print('... There are {} -- {}'.format(data[label_poi].nunique(), label_poi))
banks = [
'bancos_filiais',
'bancos_agencias',
'bancos_postos',
'bancos_PAE',
'bank',
]
filter_bank = data[label_poi].isin(banks)
data.at[data[filter_bank].index, label_poi] = 'banks'
def union_poi_bus_station(data: DataFrame, label_poi: Optional[Text] = TYPE_POI):
"""
Performs the union between the different bus station categories
for Points of Interest in a single category named 'bus_station'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
"""
print('union bus station categories to one category')
filter_bus_station = data[label_poi].isin(
['transit_station', 'pontos_de_onibus']
)
data.at[data[filter_bus_station].index, label_poi] = 'bus_station'
def union_poi_bar_restaurant(data: DataFrame, label_poi: Optional[Text] = TYPE_POI):
"""
Performs the union between bar and restaurant categories
for Points of Interest in a single category named 'bar-restaurant'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
"""
print('union restaurant and bar categories to one category')
filter_bar_restaurant = data[label_poi].isin(['restaurant', 'bar'])
data.at[data[filter_bar_restaurant].index, label_poi] = 'bar-restaurant'
def union_poi_parks(data: DataFrame, label_poi: Optional[Text] = TYPE_POI):
"""
Performs the union between park categories
for Points of Interest in a single category named 'parks'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
"""
print('union parks categories to one category')
filter_parks = data[label_poi].isin(['pracas_e_parques', 'park'])
data.at[data[filter_parks].index, label_poi] = 'parks'
def union_poi_police(data: DataFrame, label_poi: Optional[Text] = TYPE_POI):
"""
Performs the union between police categories
for Points of Interest in a single category named 'police'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
"""
print('union distritos policies and police categories')
filter_police = data[label_poi] == 'distritos_policiais'
data.at[data[filter_police].index, label_poi] = 'police'
def join_collective_areas(
gdf_: DataFrame, gdf_rules_: DataFrame, label_geometry: Optional[Text] = GEOMETRY
):
"""
It performs the integration between trajectories and collective
areas, generating a new column that informs if the point of the
trajectory is inserted in a collective area.
Parameters
----------
gdf_ : geopandas.GeoDataFrame
The input trajectory data
gdf_rules_ : geopandas.GeoDataFrame
The input coletive areas data
label_geometry : str, optional
Label referring to the Point of Interest category, by default GEOMETRY
"""
print('Integration between trajectories and collectives areas')
polygons = gdf_rules_[label_geometry].unique()
gdf_[VIOLATING] = False
for p in progress_bar(polygons):
# intersects = gdf_[label_geometry].apply(lambda x: x.intersects(p))
intersects = gdf_[label_geometry].intersects(p)
index = gdf_[intersects].index
gdf_.at[index, VIOLATING] = True
def _reset_and_creates_id_and_lat_lon(
data: DataFrame,
df_pois: DataFrame,
lat_lon_poi: Optional[bool] = True,
reset_index: Optional[bool] = True
) -> Tuple[ndarray, ndarray, ndarray, ndarray]:
"""
Resets the indexes of the dataframes, returns the minimum distance
between the two dataframes, and return their respective variables
(id, tags, latitude and longitude).
Parameters
----------
data : DataFrame
The input trajectory data.
df_pois : DataFrame
The input point of interest data.
lat_lon_poi : bool, optional
Flag to determine if the ids and tags is of size equivalent to df_pois,
by default True
reset_index : bool, optional
Flag for reset index of the df_pois and data dataframes before the join,
by default True
Returns
-------
distances, ids, tags, lat, lon: arrays with default values for join operation
"""
if reset_index:
print('... Resetting index to operation...')
data.reset_index(drop=True, inplace=True)
df_pois.reset_index(drop=True, inplace=True)
# create numpy array to store new column to DataFrame of movement objects
distances = np.full(
data.shape[0], np.Infinity, dtype=np.float64
)
ids = np.full(data.shape[0], '', dtype='object_')
tags = np.full(data.shape[0], '', dtype='object_')
# creating lat and lon array to operation
if lat_lon_poi:
lat = np.full(df_pois.shape[0], np.Infinity, dtype=np.float64)
lon = np.full(df_pois.shape[0], np.Infinity, dtype=np.float64)
else:
lat = np.full(data.shape[0], np.Infinity, dtype=np.float64)
lon = np.full(data.shape[0], np.Infinity, dtype=np.float64)
return distances, ids, tags, lat, lon
def _reset_set_window__and_creates_event_id_type(
data: DataFrame, df_events: DataFrame, label_date: Text, time_window: int
) -> Tuple[Series, Series, ndarray, ndarray, ndarray]:
"""
Resets the indexes of the dataframes, set time window, and returns
the current distance between the two dataframes, and return their
respective variables (event_id, event_type).
Parameters
----------
data : DataFrame
The input trajectory data.
df_events : DataFrame
The input event point of interest data.
label_date : str
Label of data referring to the datetime.
time_window : int
Number of seconds of the time window.
Returns
-------
window_starts, window_ends, current_distances, event_id, event_type
"""
# get a vector with windows time to each point
data.reset_index(drop=True, inplace=True)
df_events.reset_index(drop=True, inplace=True)
# compute windows time
window_starts = data[label_date] - Timedelta(seconds=time_window)
window_ends = data[label_date] + Timedelta(seconds=time_window)
# create vector to store distances
current_distances = np.full(
data.shape[0], np.Infinity, dtype=np.float64
)
event_type = np.full(data.shape[0], '', dtype='object_')
event_id = np.full(data.shape[0], '', dtype='object_')
return window_starts, window_ends, current_distances, event_id, event_type
def _reset_set_window_and_creates_event_id_type_all(
data: DataFrame, df_events: DataFrame, label_date: Text, time_window: int
) -> Tuple[Series, Series, ndarray, ndarray, ndarray]:
"""
Resets the indexes of the dataframes, set time window, and returns
the current distance between the two dataframes, and return their
respective variables (event_id, event_type).
Parameters
----------
data : DataFrame
The input trajectory data.
df_events : DataFrame
The input event point of interest data.
label_date : str
Label of data referring to the datetime.
time_window : Int
Number of seconds of the time window.
Returns
-------
window_starts, window_ends, current_distances, event_id, event_type
arrays with default values for join operation
"""
# get a vector with windows time to each point
data.reset_index(drop=True, inplace=True)
df_events.reset_index(drop=True, inplace=True)
# compute windows time
window_starts = data[label_date] - Timedelta(seconds=time_window)
window_ends = data[label_date] + Timedelta(seconds=time_window)
# create vector to store distances
current_distances = np.full(
data.shape[0], None, dtype=np.ndarray
)
event_type = np.full(data.shape[0], None, dtype=np.ndarray)
event_id = np.full(data.shape[0], None, dtype=np.ndarray)
return window_starts, window_ends, current_distances, event_id, event_type
def join_with_pois(
data: DataFrame,
df_pois: DataFrame,
label_id: Optional[Text] = TRAJ_ID,
label_poi_name: Optional[Text] = NAME_POI,
reset_index: Optional[Text] = True
):
"""
Performs the integration between trajectories and points
of interest, generating two new columns referring to the
name and the distance from the point of interest closest
to each point of the trajectory.
Parameters
----------
data : DataFrame
The input trajectory data.
df_pois : DataFrame
The input point of interest data.
label_id : str, optional
Label of df_pois referring to the Point of Interest id, by default TRAJ_ID
label_poi_name : str, optional
Label of df_pois referring to the Point of Interest name, by default NAME_POI
reset_index : bool, optional
Flag for reset index of the df_pois and data dataframes before the join,
by default True
"""
print('Integration with POIs...')
values = _reset_and_creates_id_and_lat_lon(data, df_pois, True, reset_index)
current_distances, ids_POIs, tag_POIs, lat_user, lon_user = values
for idx, row in progress_bar(data.iterrows(), total=len(data)):
# create a vector to each lat
lat_user.fill(row[LATITUDE])
lon_user.fill(row[LONGITUDE])
# computing distances to idx
distances = np.float64(
haversine(
lat_user,
lon_user,
df_pois[LATITUDE].values,
df_pois[LONGITUDE].values,
)
)
# get index to arg_min and min distance
index_min = np.argmin(distances)
current_distances[idx] = np.min(distances)
# setting data for a single object movement
ids_POIs[idx] = df_pois.at[index_min, label_id]
tag_POIs[idx] = df_pois.at[index_min, label_poi_name]
data[ID_POI] = ids_POIs
data[DIST_POI] = current_distances
data[NAME_POI] = tag_POIs
print('Integration with POI was finalized')
def join_with_pois_optimizer(
data,
df_pois: DataFrame,
label_id: Optional[Text] = TRAJ_ID,
label_poi_name: Optional[Text] = NAME_POI,
dist_poi: Optional[List] = None,
reset_index: Optional[Text] = True
):
"""
Performs the integration between trajectories and points
of interest, generating two new columns referring to the
name and distance from the nearest point of interest,
within the limit of distance determined by the parameter 'dist_poi',
of each point in the trajectory.
Parameters
----------
data : DataFrame
The input trajectory data.
df_pois : DataFrame
The input point of interest data.
label_id | |
""" Stripped-down, cleaner variants of PNPS allowing more general geometries """
import numpy
from dolfin import *
from collections import OrderedDict
from nanopores.tools import (CoupledProblem, solvermethods,
GeneralNonlinearProblem, GeneralLinearProblem, CoupledSolver,
GoalAdaptivePDE, meshelement)
from nanopores.models.mysolve import mesh_quality
__all__ = ["SimplePNPProblem", "SimplePBProblem", "SimpleStokesProblem",
"SimplePoissonProblem",
"PNPSHybrid", "PNPSFixedPoint", "PNPFixedPoint",
"PNPSFixedPointbV", "PNPFixedPointNonlinear"]
# --- Problems ---
class SimplePNPProblem(GeneralNonlinearProblem):
method = dict(solvermethods.bicgstab)
method["iterative"] = False
@staticmethod
def space(mesh, k=1):
if dolfin.__version__ == "1.6.0":
V = FunctionSpace(mesh, "CG", k)
return MixedFunctionSpace((V, V, V))
P1 = FiniteElement("P", meshelement(mesh), k)
P = MixedElement((P1, P1, P1))
return FunctionSpace(mesh, P)
@staticmethod
def initial_u(V, geo, phys, v0=None):
u = Function(V)
if v0 is None:
u.interpolate(Constant((0.0, phys.bulkcon, phys.bulkcon)))
else:
W = V.sub(0).collapse()
v = interpolate(v0, W)
c0 = phys.bulkcon
cp = Function(W)
cm = Function(W)
cp.vector()[:] = c0*numpy.exp(-v.vector()[:]/phys.UT)
cm.vector()[:] = c0*numpy.exp(v.vector()[:]/phys.UT)
assign(u, [v, cp, cm])
return u
@staticmethod
def forms(V, geo, phys, u, ustokes=None, cyl=False):
if ustokes is None:
dim = geo.mesh.topology().dim()
ustokes = Constant(tuple(0. for i in range(dim)))
dx = geo.dx()
dx_ions = geo.dx("fluid")
n = FacetNormal(geo.mesh)
r2pi = Expression("2*pi*x[0]", degree=1) if cyl else Constant(1.0)
lscale = Constant(phys.lscale)
grad = phys.grad
eps = geo.pwconst("permittivity")
Dp = phys.Dp #geo.pwconst("Dp")
Dm = phys.Dm #geo.pwconst("Dm")
kT = Constant(phys.kT)
q = Constant(phys.qq)
F = Constant(phys.cFarad)
(v, cp, cm) = split(u)
(w, dp, dm) = TestFunctions(V)
Jm = -Dm*(grad(cm) - q/kT*cm*grad(v)) + cm*ustokes
Jp = -Dp*(grad(cp) + q/kT*cp*grad(v)) + cp*ustokes
apoisson = inner(eps*grad(v), grad(w))*r2pi*dx - F*(cp - cm)*w*r2pi*dx_ions
aJm = inner(Jm, grad(dm))*r2pi*dx_ions
aJp = inner(Jp, grad(dp))*r2pi*dx_ions
# TODO: investigate "no bcs" further. in the test problem, they don't work as expected
aNoBCp = -jump(lscale*Jp*dp*r2pi, n)*geo.dS("nocbc") - lscale*inner(Jp, n*dp)*r2pi*geo.ds("nocbc")
aNoBCm = -jump(lscale*Jm*dm*r2pi, n)*geo.dS("nocbc") - lscale*inner(Jm, n*dm)*r2pi*geo.ds("nocbc")
Lqvol = geo.linearRHS(w*r2pi, "volcharge")
Lqsurf = lscale*geo.NeumannRHS(w*r2pi, "surfcharge")
LJm = lscale*geo.NeumannRHS(dm*r2pi, "cmflux")
LJp = lscale*geo.NeumannRHS(dp*r2pi, "cpflux")
L = apoisson + aJm + aJp + aNoBCp + aNoBCm - Lqvol - Lqsurf - LJm - LJp
a = derivative(L, u)
return a, L
@staticmethod
def bcs(V, geo, phys):
return geo.pwBC(V.sub(0), "v0") + \
geo.pwBC(V.sub(1), "cp0") + \
geo.pwBC(V.sub(2), "cm0")
class SimpleLinearPBProblem(GeneralLinearProblem):
method = dict(solvermethods.bicgstab)
method["kparams"].update(
relative_tolerance = 1e-12,
absolute_tolerance = 1e-12,
)
@staticmethod
def space(mesh, k=1):
return FunctionSpace(mesh, 'CG', k)
@staticmethod
def forms(V, geo, phys):
cyl = phys.cyl
dx = geo.dx()
dx_ions = geo.dx("fluid")
r2pi = Expression("2*pi*x[0]", degree=1) if cyl else Constant(1.0)
lscale = Constant(phys.lscale)
grad = phys.grad
eps = geo.pwconst("permittivity")
UT = Constant(phys.UT)
k = Constant(2*phys.cFarad*phys.bulkcon)
u = TrialFunction(V)
w = TestFunction(V)
a = inner(eps*grad(u), grad(w))*r2pi*dx + k/UT*u*w*r2pi*dx_ions
Lqvol = geo.linearRHS(w*r2pi, "volcharge")
Lqsurf = lscale*geo.NeumannRHS(w*r2pi, "surfcharge")
L = Lqvol + Lqsurf
return a, L
@staticmethod
def bcs(V, geo, phys):
return geo.pwconstBC(V, "v0", homogenize=True)
class SimplePBProblem(GeneralNonlinearProblem):
method = dict(solvermethods.bicgstab)
@staticmethod
def space(mesh, k=1):
return FunctionSpace(mesh, 'CG', k)
@staticmethod
def forms(V, geo, phys, u, cyl=False):
dx = geo.dx()
dx_ions = geo.dx("fluid")
r2pi = Expression("2*pi*x[0]") if cyl else Constant(1.0)
lscale = Constant(phys.lscale)
grad = phys.grad
eps = geo.pwconst("permittivity")
UT = Constant(phys.UT)
k = Constant(2*phys.cFarad*phys.bulkcon)
w = TestFunction(V)
apoisson = inner(eps*grad(u), grad(w))*r2pi*dx + k*sinh(u/UT)*w*r2pi*dx_ions
Lqvol = geo.linearRHS(w*r2pi, "volcharge")
Lqsurf = lscale*geo.NeumannRHS(w*r2pi, "surfcharge")
L = apoisson - Lqvol - Lqsurf
a = derivative(L, u)
return a, L
@staticmethod
def bcs(V, geo, phys):
# TODO: really only allow homogeneous BCs for PB?
return geo.pwconstBC(V, "v0", homogenize=True)
class SimplePoissonProblem(GeneralLinearProblem):
method = dict(solvermethods.poisson)
@staticmethod
def space(mesh, k=1):
return FunctionSpace(mesh, 'CG', k)
@staticmethod
def forms(V, geo, phys, f=None, dxf=None, cyl=False):
dx = geo.dx()
r2pi = Expression("2*pi*x[0]", degree=1) if cyl else Constant(1.0)
lscale = Constant(phys.lscale)
grad = phys.grad
eps = geo.pwconst("permittivity")
v = TrialFunction(V)
w = TestFunction(V)
a = inner(eps*grad(v), grad(w))*r2pi*dx
Lqvol = geo.linearRHS(w*r2pi, "volcharge")
Lqsurf = lscale*geo.NeumannRHS(w*r2pi, "surfcharge")
L = Lqvol + Lqsurf
if f is not None:
L = L + f*w*r2pi*dxf
return a, L
@staticmethod
def bcs(V, geo, phys):
return geo.pwBC(V, "v0")
class LinearSGPoissonProblem(GeneralLinearProblem):
"Linearized Scharfetter-Gummel-type Poisson problem for fixed point PNP"
method = dict(solvermethods.bicgstab)
@staticmethod
def space(mesh, k=1):
return FunctionSpace(mesh, 'CG', k)
@staticmethod
def forms(V, geo, phys, u, cp, cm, dx_ions, cyl=False):
dx = geo.dx()
r2pi = Expression("2*pi*x[0]", degree=1) if cyl else Constant(1.0)
lscale = Constant(phys.lscale)
grad = phys.grad
eps = geo.pwconst("permittivity")
UT = Constant(phys.UT)
F = Constant(phys.cFarad)
v = TrialFunction(V)
w = TestFunction(V)
a = inner(eps*grad(v), grad(w))*r2pi*dx + F*v/UT*(cp + cm)*w*r2pi*dx_ions
Lqions = F*((cp - cm) + u/UT*(cp + cm))*w*r2pi*dx_ions
Lqvol = geo.linearRHS(w*r2pi, "volcharge")
Lqsurf = lscale*geo.NeumannRHS(w*r2pi, "surfcharge")
L = Lqions + Lqvol + Lqsurf
return a, L
@staticmethod
def bcs(V, geo, phys):
return geo.pwBC(V, "v0")
class SGPoissonProblem(GeneralNonlinearProblem):
"Scharfetter-Gummel-type Poisson problem for fixed point PNP"
method = dict(solvermethods.bicgstab)
@staticmethod
def space(mesh, k=1):
return FunctionSpace(mesh, 'CG', k)
@staticmethod
def forms(V, geo, phys, u, uold, cp, cm, dx_ions, cyl=False):
dx = geo.dx()
r2pi = Expression("2*pi*x[0]", degree=1) if cyl else Constant(1.0)
lscale = Constant(phys.lscale)
grad = phys.grad
eps = geo.pwconst("permittivity")
UT = Constant(phys.UT)
F = Constant(phys.cFarad)
w = TestFunction(V)
apoisson = inner(eps*grad(u), grad(w))*r2pi*dx \
- F*(exp(-(u-uold)/UT)*cp - exp((u-uold)/UT)*cm)*w*r2pi*dx_ions
Lqvol = geo.linearRHS(w*r2pi, "volcharge")
Lqsurf = lscale*geo.NeumannRHS(w*r2pi, "surfcharge")
L = apoisson - (Lqvol + Lqsurf)
a = derivative(L, u)
return a, L
@staticmethod
def bcs(V, geo, phys):
return geo.pwBC(V, "v0")
class SimpleNernstPlanckProblem(GeneralLinearProblem):
method = dict(solvermethods.bicgstab)
@staticmethod
def space(mesh, k=1):
return FunctionSpace(mesh, "CG", k)
@staticmethod
def initial_u(V, phys):
u = Function(V)
u.interpolate(Constant(phys.bulkcon))
return u
@staticmethod
def forms(V, geo, phys, z, E, D=None, ustokes=None, cyl=False):
if ustokes is None:
dim = phys.dim
ustokes = Constant(tuple(0. for i in range(dim)))
if D is None:
D = geo.pwconst("D")
dx = geo.dx("fluid")
r2pi = Expression("2*pi*x[0]", degree=1) if cyl else Constant(1.0)
grad = phys.grad
kT = Constant(phys.kT)
q = Constant(phys.qq)
c = TrialFunction(V)
d = TestFunction(V)
J = -D*grad(c) + z*q*D/kT*E*c + c*ustokes
a = inner(J, grad(d))*r2pi*dx
L = Constant(0.)*d*dx
return a, L
@staticmethod
def bcs(V, geo, phys):
return geo.pwBC(V, "c0")
class SimpleStokesProblem(GeneralLinearProblem):
"stabilized equal-order formulation; consistent for k=1"
method = dict(solvermethods.stokes)
@staticmethod
def space(mesh, ku=1, kp=1):
if dolfin.__version__ == "1.6.0":
U = VectorFunctionSpace(mesh, "CG", ku)
P = FunctionSpace(mesh, "CG", kp)
return U*P
U = VectorElement("P", meshelement(mesh), ku)
P = FiniteElement("P", meshelement(mesh), kp)
return FunctionSpace(mesh, U*P)
@staticmethod
def forms(V, geo, phys, f=None, cyl=False, beta=.01, conservative=True,
fluid="fluid"):
# beta = stabilization parameter, TODO: better lower in 2D?
mesh = geo.mesh
if f is None:
dim = geo.mesh.topology().dim()
f = Constant(tuple(0. for i in range(dim)))
(u, p) = TrialFunctions(V)
(v, q) = TestFunctions(V)
grad = phys.grad
div = phys.div
lscale = phys.lscale
dx = geo.dx(fluid)
r = Expression("x[0]/L", L=Constant(lscale), degree=1)
pi2 = Constant(2.*pi)
h = CellSize(mesh)
delta = Constant(beta/lscale**2)*h**2
eta = Constant(phys.eta)
eta2 = Constant(2*phys.eta)
pscale = Constant(phys.pscale)
# scale pressure
p *= pscale
q *= pscale
def eps(u): return sym(grad(u))
# conservative formulation for correct BC, with added stabilization term
if cyl:
a = (eta2*inner(eps(u), eps(v))*r + eta2*u[0]*v[0]/r + \
(div(v)*r+v[0])*p + q*(u[0] + div(u)*r))*pi2*dx - \
delta*inner(grad(p), grad(q))*r*pi2*dx
L = inner(f, v - delta*grad(q))*r*pi2*dx
else:
a = (eta2*inner(eps(u), eps(v)) + div(v)*p + q*div(u))*dx \
- delta*inner(grad(p), grad(q))*dx
L = inner(f, v - delta*grad(q))*dx
# optional non-conservative formulation with neumann BC n*grad(u) = 0
if not conservative and cyl:
a = (eta*inner(grad(u), grad(v))*r + eta*u[0]*v[0]/r - \
inner(v, grad(p))*r + q*(u[0] + div(u)*r))*pi2*dx - \
delta*inner(grad(p), grad(q))*r*pi2*dx
L = inner(f, v - delta*grad(q))*r*pi2*dx
if not conservative and not cyl:
a = (eta*inner(grad(u), grad(v)) - inner(v, grad(p)) + q*div(u))*dx \
- delta*inner(grad(p), grad(q))*dx
L = inner(f, v - delta*grad(q))*dx
# TODO: be able to include preconditioning form
# p = 2*inner(sym(grad(u)), sym(grad(v)))*dx + lscale*inner(p, q)*dx
return a, L
def precondition(self, geo, **kwargs):
# assumes conservative, non-axisymmetric formulation
W = self.params["V"]
phys = self.params["phys"]
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
dx = geo.dx("fluid")
grad = phys.grad
lscale = Constant(phys.lscale)
pscale = Constant(phys.pscale)
# scale pressure
p *= pscale
q *= pscale
eta2 = Constant(2*phys.eta)
def eps(u): return sym(grad(u))
P = (eta2*inner(eps(u), eps(v)) + lscale/pscale*p*q)*dx
self.method["preconditioning_form"] = P
def __init__(self, geo, **params):
GeneralLinearProblem.__init__(self, geo, **params)
self.precondition(geo, **params)
@staticmethod
def bcs(V, geo):
return geo.pwBC(V.sub(0), "noslip") + geo.pwBC(V.sub(1), "pressure")
# --- hybrid solver ---
class PNPSHybrid(CoupledSolver):
def __init__(self, geo, phys, goals=[], iterative=False, **params):
problems = OrderedDict([
("pnp", SimplePNPProblem),
("stokes", SimpleStokesProblem),])
def couple_pnp(ustokes):
return dict(ustokes = ustokes.sub(0))
def couple_stokes(upnp, phys):
v, | |
"rowIndices: " + str(rowIndices) )
for I in rowIndices:
# I is the index of the template image
tempComplex = self.__baseImageFFT[I,:,:]
self.__templateImageFFT = nz.evaluate( "conj(tempComplex)")
tempComplex2 = self.__baseSquaredFFT[I,:,:]
self.__templateSquaredFFT = nz.evaluate( "conj(tempComplex2)")
if not self.masks.shape[0] == 1:
tempComplex = baseMaskFFT[I,:,:]
self.__templateMaskFFT = nz.evaluate( "conj(tempComplex)")
# Now we can start looping through base images
columnIndices = np.unique( np.argwhere( triIndices[I,:] ) )
#print( "columnIndices: " + str(columnIndices) )
for J in columnIndices:
####### MNXC2 revisement with private variable to make the code more manageable.
self.mnxc2( I, J, self.__shapeCropped )
#### Find maximum positions ####
self.locatePeak( I, J )
if self.verbose:
print( "# " + str(I) + "->" + str(J) + " shift: [%.2f"%self.__shiftsTriMat[I,J,0]
+ ", %.2f"%self.__shiftsTriMat[I,J,1]
+ "], cc: %.6f"%self.__corrTriMat[I,J]
+ ", peak sig: %.3f"%self.__peaksigTriMat[I,J] )
# Correlation stats is for establishing correlation scores for fixed-pattern noise.
if bool( self.trackCorrStats ):
# Track the various statistics about the correlation map, mean, std, max, skewness
self.calcCorrStats( currIndex, triIndices )
# triMode 'auto' diagonal mode
if self.triMode == u'auto' and (self.__peaksigTriMat[I,J] <= self.peaksigThres or J-I >= self.autoMax):
if self.verbose: print( "triMode 'auto' stopping at frame: " + str(J) )
break
currIndex += 1
pass # C max position location
if bool( np.any( self.fouCrop ) ):
self.__shiftsTriMat[:,:,0] *= self.shapePadded[0] / self.__shapeCropped[0]
self.__shiftsTriMat[:,:,1] *= self.shapePadded[1] / self.__shapeCropped[1]
self.bench['xcorr3'] = time.time()
# Pointer reference house-keeping
del templateMaskFFT, tempComplex, tempComplex2 # Pointer
return
def mnxc2( self, I, J, shapeCropped, refine=False ):
"""
2-D Masked, Intensity Normalized, Cross-correlation
"""
tempComplex = self.__tempComplex # Pointer re-assignment
tempComplex2 = self.__tempComplex2 # Pointer re-assignment
maskProduct = self.__maskProduct
normConst2 = self.__normConst2
if not self.masks.shape[0] == 1:
# Compute maskProduct, term is M1^* .* M2
baseMask_block = self.__baseMaskFFT[J,:,:]; templateMaskFFT = self.__templateMaskFFT # Pointer re-assignment
tempComplex2 = nz.evaluate( "templateMaskFFT * baseMask_block" )
self.__IFFT2.update_arrays( tempComplex2, tempComplex ); self.__IFFT2.execute()
# maskProduct = np.clip( np.round( np.real( tempComplex ) ), eps, np.Inf )
self.__maskProduct = nz.evaluate( "real(tempComplex)*normConst2" )
# Compute mask correlation terms
if self.masks.shape[0] == 1:
templateImageFFT = self.__templateImageFFT; baseMask_block = self.__baseMaskFFT # Pointer re-assignment
self.__IFFT2.update_arrays( nz.evaluate( "baseMask_block * templateImageFFT"), tempComplex ); self.__IFFT2.execute()
Corr_templateMask = nz.evaluate( "real(tempComplex)*normConst2" ) # Normalization
baseImageFFT_block = self.__baseImageFFT[J,:,:]; templateMaskFFT = self.__templateMaskFFT
self.__IFFT2.update_arrays( nz.evaluate( "templateMaskFFT * baseImageFFT_block"), tempComplex ); self.__IFFT2.execute()
# These haven't been normalized, so let's do so. They are FFT squared, so N*N
# This reduces the strain on single-precision range.
Corr_baseMask = nz.evaluate( "real(tempComplex)*normConst2" ) # Normalization
# Compute the intensity normalzaiton for the template
if self.masks.shape[0] == 1:
baseMaskFFT = self.__baseMaskFFT; templateSquaredFFT = self.__templateSquaredFFT
self.__IFFT2.update_arrays( nz.evaluate( "baseMaskFFT * templateSquaredFFT"), tempComplex ); self.__IFFT2.execute()
else:
self.__IFFT2.update_arrays( nz.evaluate( "baseMaskFFT_block * templateSquaredFFT"), tempComplex ); self.__IFFT2.execute()
# DenomTemplate = nz.evaluate( "real(tempComplex)*normConst2 - real( Corr_templateMask * (Corr_templateMask / maskProduct) )" )
# Compute the intensity normalzaiton for the base Image
baseSquared_block = self.__baseSquaredFFT[J,:,:]
self.__IFFT2.update_arrays( nz.evaluate( "templateMaskFFT * baseSquared_block"), tempComplex2 ); self.__IFFT2.execute()
# Compute Denominator intensity normalization
# DenomBase = nz.evaluate( "real(tempComplex2)*normConst2- real( Corr_baseMask * (Corr_baseMask / maskProduct) )" )
Denom = nz.evaluate( "sqrt( (real(tempComplex2)*normConst2- real( Corr_baseMask * (Corr_baseMask / maskProduct)))" +
"* (real(tempComplex)*normConst2 - real( Corr_templateMask * (Corr_templateMask / maskProduct)) ) )" )
# What happened to numexpr clip?
Denom = np.clip( Denom, 1, np.Inf )
# print( "Number of small Denominator values: " + str(np.sum(DenomTemplate < 1.0)) )
# Compute Numerator (the phase correlation)
tempComplex2 = nz.evaluate( "baseImageFFT_block * templateImageFFT" )
self.__IFFT2.update_arrays( tempComplex2, tempComplex ); self.__IFFT2.execute()
# Numerator = nz.evaluate( "real(tempComplex)*normConst2 - real( Corr_templateMask * Corr_baseMask / maskProduct)" )
# Compute final correlation
self.__C = nz.evaluate( "(real(tempComplex)*normConst2 - real( Corr_templateMask * Corr_baseMask / maskProduct)) / Denom" )
# print( "%%%% mnxc2.Denom.dtype = " + str(Denom.dtype) )
self.__originTriMat[I,J] = self.__C[0,0]
if bool(self.suppressOrigin):
# If gain reference is quite old we can still get one bright pixel at the center.
# The hot pixel filter has mitigated this but it's still a minor source of bias.
self.__C[0,0] = 0.125 * ( self.__C[1,0] + self.__C[0,1] + self.__C[-1,0] + self.__C[-1,0] +
self.__C[1,1] + self.__C[-1,1] + self.__C[-1,1] + self.__C[-1,-1] )
# We have everything in normal FFT order until here; Some speed-up could be found by its removal.
# Pratically we don't have to do this fftshift, but it makes plotting easier to understand
self.__C = np.fft.ifftshift( self.__C )
# We can crop C if maxShift is not None and preShift is False
if self.maxShift is not None and self.preShift is False:
shapeCropped2 = (np.array(shapeCropped)/2.0).astype('int')
self.__C = self.__C[shapeCropped2[0]-self.maxShift:shapeCropped2[0]+self.maxShift, shapeCropped2[1]-self.maxShift:shapeCropped2[1]+self.maxShift]
del normConst2, baseMask_block, templateMaskFFT, templateImageFFT, Corr_templateMask, baseImageFFT_block
del Corr_baseMask, baseSquared_block, baseMaskFFT, templateSquaredFFT, maskProduct
del tempComplex, tempComplex2
def locatePeak( self, I, J ):
"""
Subpixel peak location by Fourier interpolation.
"""
tempComplex = self.__tempComplex; tempComplex2 = self.__tempComplex2
# Apply B-factor low-pass filter to correlation function
if self.Bmode == 'opti':
self.bench['opti0'] = time.time()
# Want to define this locally so it inherits scope variables.
def inversePeakContrast( Bsigma ):
self.__C_filt = scipy.ndimage.gaussian_filter( self.__C, Bsigma )
return np.std(self.__C_filt ) / (np.max(self.__C_filt ) - np.mean(self.__C_filt ) )
# B_opti= scipy.optimize.fminbound( inversePeakContrast, 0.0, 10.0, xtol=1E-3 )
sigmaOptiMax = 7.0
sigmaOptiMin = 0.0
maxIter = 15 # Let's apply some more constraints to speed this up
tolerance = 0.01
result = scipy.optimize.minimize_scalar( inversePeakContrast,
bounds=[sigmaOptiMin,sigmaOptiMax], method="bounded",
options={'maxiter':maxIter, 'xatol':tolerance } )
self.__C_filt = scipy.ndimage.gaussian_filter( self.__C, result.x )
self.bench['opti0'] = time.time()
if self.verbose >= 2:
print( "Found optimum B-sigma: %.3f"%result.x + ", with peak sig: %.3f"%(1.0/result.fun)+" in %.1f"%(1E3*(self.bench['opti1']-self.bench['opti0']))+" ms" )
elif bool(self.Brad) and self.Bmode =='fourier':
tempComplex = self.__C.astype(fftw_dtype)
self.__FFT2.update_arrays( tempComplex, tempComplex2 ); self.__FFT2.execute()
Bfilter = self.__Bfilter
self.__IFFT2.update_arrays( nz.evaluate( "tempComplex2*Bfilter" ), tempComplex ); self.__IFFT2.execute()
# Conservation of counts with Fourier filtering is not
# very straight-forward.
C_filt = nz.evaluate( "real( tempComplex )/sqrt(normConst)" )
elif bool(self.Brad) and self.Bmode == u'conv' or self.Bmode == u'convolution':
# Convert self.Brad as an MTF to an equivalent sigma for a PSF
# TODO: Check that Bsigma is correct with Fourier cropping"
Bsigma = self.shapePadded / (np.sqrt(2) * np.pi * self.Brad)
# Scipy's gaussian filter conserves total counts
self.__C_filt = scipy.ndimage.gaussian_filter( self.__C, Bsigma )
else: # No filtering
self.__C_filt = self.__C
# Apply maximum shift max mask, if present
if bool( self.maxShift ):
# for previous frame alignment compensation, we need to shift the mask around...
C_filt = self.__C_filt
if bool( self.preShift ):
# print( "In pre-shift" )
# This isn't working with 'refine'
if self.triMode != u'refine':
rolledMask = np.roll( np.roll( self.__mask_maxShift,
np.round(self.__shiftsTriMat[I,J-1,0]).astype('int'), axis=0 ),
np.round(self.__shiftsTriMat[I,J-1,1]).astype('int'), axis=1 )
elif self.triMode == u'refine':
# With refine the matrix is populated like an autocorrelation function.
rolledMask = np.roll( np.roll( self.__mask_maxShift,
np.round(self.__shiftsTriMat[I-1,I-1,0]).astype('int'), axis=0 ),
np.round(self.__shiftsTriMat[I-1,I-1,1]).astype('int'), axis=1 )
pass
C_masked = nz.evaluate("C_filt*rolledMask")
cmaxpos = np.unravel_index( np.argmax( C_masked ), C_masked.shape )
self.__peaksigTriMat[I,J] = (C_masked[cmaxpos] - np.mean(C_filt[rolledMask]))/ np.std(C_filt[rolledMask])
else:
mask_maxShift = self.__mask_maxShift
C_masked = nz.evaluate("C_filt*mask_maxShift")
cmaxpos = np.unravel_index( np.argmax( C_masked ), C_filt.shape )
self.__peaksigTriMat[I,J] = (C_masked[cmaxpos] - np.mean(C_filt[self.__mask_maxShift]))/ np.std(C_filt[self.__mask_maxShift])
else: # No maxshift
cmaxpos = np.unravel_index( np.argmax(C_filt), C_filt.shape )
self.__peaksigTriMat[I,J] = (self.__corrTriMat[I,J] - np.mean(C_filt))/ np.std(C_filt)
if self.saveC:
# Maybe save in a pyTable if it's really needed.peaksig
if self.preShift:
self.C.append(self.__C_filt*rolledMask)
else:
self.C.append(self.__C_filt)
if self.subPixReg > 1.0: # Subpixel peak estimation by Fourier interpolation
Csub = C_filt[cmaxpos[0]-self.__subR:cmaxpos[0]+self.__subR, cmaxpos[1]-self.__subR:cmaxpos[1]+self.__subR ]
# Csub is shape [2*subR, 2*subR]
if Csub.shape[0] == 2*self.__subR and Csub.shape[1] == 2*self.__subR:
self.__subFFT2.update_arrays( Csub.astype( fftw_dtype ), self.__CsubFFT ); self.__subFFT2.execute()
# padding has to be done from the middle
# TODO: I think pad has issues with complex numbers?
#CpadFFT = np.pad( np.fft.fftshift(self.__CsubFFT), ((self.subPixReg-1)*self.__subR,), mode=b'constant', constant_values=(0.0,) )
self.__CpadFFT = np.zeros( [self.subPixReg*self.__subR*2,self.subPixReg*self.__subR*2], dtype=fftw_dtype )
# NUMPY BUG: mode has to be a byte string
self.__CpadFFT.real = np.pad( np.fft.fftshift(self.__CsubFFT.real), ((self.subPixReg-1)*self.__subR,), mode=constantPad, constant_values=(0.0,) )
self.__CpadFFT.imag = np.pad( np.fft.fftshift(self.__CsubFFT.imag), ((self.subPixReg-1)*self.__subR,), mode=constantPad, constant_values=(0.0,) )
self.__CpadFFT = np.fft.ifftshift( self.__CpadFFT )
self.__subIFFT2.update_arrays( self.__CpadFFT, self.__Csub_over ); self.__subIFFT2.execute()
# Csub_overAbs = | |
<filename>main.py
"""Implementation of distributed training for training deep learning models from
<NAME>., <NAME>., <NAME>., & <NAME>.
"Trading Redundancy for Communication: Speeding up Distributed SGD for Non-convex Optimization."
International Conference on Machine Learning. 2019.
Support single-host training with one or multiple devices.
"""
from __future__ import division
from __future__ import print_function
import cifar
import utils
import resnet_model
import argparse
import functools
import itertools
import os
import json
from collections import namedtuple
import numpy as np
import six
from six.moves import xrange
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, <NAME>"
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__status__ = "Prototype"
"""
def get_model_fn(num_gpus, variable_strategy, num_workers, run_type='local'):
"""Returns a function that will build the resnet model."""
def _resnet_model_fn_sync(features, labels, mode, params):
"""Resnet model body.
Support single host, one or more GPU training. Parameter distribution can
be either one of the following scheme.
1. CPU is the parameter server and manages gradient updates.
2. Parameters are distributed evenly across all GPUs, and the first GPU
manages gradient updates.
Args:
features: a list of tensors, one for each tower
labels: a list of tensors, one for each tower
mode: ModeKeys.TRAIN or EVAL
params: Hyperparameters suitable for tuning
Returns:
A EstimatorSpec object.
"""
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
weight_decay = params.weight_decay
momentum = params.momentum
features = features[0:num_gpus]
labels = labels[0:num_gpus]
tower_features = features
tower_labels = labels
tower_losses = []
tower_gradvars = []
tower_preds = []
# channels first (NCHW) is normally optimal on GPU and channels last (NHWC)
# on CPU. The exception is Intel MKL on CPU which is optimal with
# channels_last.
data_format = params.data_format
if not data_format:
if num_gpus == 0:
data_format = 'channels_last'
else:
data_format = 'channels_first'
if num_gpus == 0:
num_devices = 1
device_type = 'cpu'
else:
num_devices = num_gpus
device_type = 'gpu'
for i in range(num_devices):
worker_device = '/{}:{}'.format(device_type, i)
if variable_strategy == 'CPU':
device_setter = utils.local_device_setter(
worker_device=worker_device)
elif variable_strategy == 'GPU':
device_setter = utils.local_device_setter(
ps_device_type='gpu',
worker_device=worker_device,
ps_strategy=tf.contrib.training.GreedyLoadBalancingStrategy(
num_gpus, tf.contrib.training.byte_size_load_fn))
with tf.variable_scope('resnet', reuse=bool(i != 0)) as var_scope:
with tf.name_scope('tower_%d' % i) as name_scope:
with tf.device(device_setter):
loss, gradvars, preds = _tower_fn(
is_training, weight_decay, tower_features[i], tower_labels[i],
data_format, params.num_layers, params.batch_norm_decay,
params.batch_norm_epsilon, var_scope.name, params.dataset)
tower_losses.append(loss)
tower_gradvars.append(gradvars)
tower_preds.append(preds)
if i == 0:
# Only trigger batch_norm moving mean and variance update from
# the 1st tower. Ideally, we should grab the updates from all
# towers but these stats accumulate extremely fast so we can
# ignore the other stats from the other towers without
# significant detriment.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
name_scope)
# Now compute global loss and gradients.
gradvars = []
with tf.name_scope('gradient_averaging'):
all_grads = {}
for grad, var in itertools.chain(*tower_gradvars):
if grad is not None:
all_grads.setdefault(var, []).append(grad)
for var, grads in six.iteritems(all_grads):
# Average gradients on the same device as the variables
# to which they apply.
with tf.device(var.device):
if len(grads) == 1:
avg_grad = grads[0]
else:
avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))
gradvars.append((avg_grad, var))
# Device that runs the ops to apply global gradient updates.
consolidation_device = '/gpu:0' if variable_strategy == 'GPU' else '/cpu:0'
with tf.device(consolidation_device):
# Suggested learning rate scheduling from
# https://github.com/ppwwyyxx/tensorpack/blob/master/examples/ResNet/cifar100-resnet.py#L155
num_images = cifar.CifarDataSet.num_examples_per_epoch('train', params.dataset)
if params.dataset in ['cifar10','cifar100']:
learning_rate_fn = utils.learning_rate_with_decay(
batch_size=params.train_batch_size, batch_denom=params.train_batch_size,
num_images=num_images, boundary_epochs=[91, 136, 182],
decay_rates=[1, 0.1, 0.01, 0.001])
elif params.dataset == 'imagenet':
learning_rate_fn = utils.learning_rate_with_decay(
batch_size=params.train_batch_size, batch_denom=params.train_batch_size,
num_images=num_images, boundary_epochs=[30, 60, 80, 90],
decay_rates=[1, 0.1, 0.01, 0.001, 1e-4],
warmup=params.warmup, base_lr=params.learning_rate)
learning_rate = learning_rate_fn(tf.train.get_global_step())
loss = tf.reduce_mean(tower_losses, name='loss')
examples_sec_hook = utils.ExamplesPerSecondHook(
params.train_batch_size, every_n_steps=10)
tensors_to_log = {'learning_rate': learning_rate, 'loss': loss}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
train_hooks = [logging_hook, examples_sec_hook]
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=momentum)
if params.run_type == 'sync':
optimizer = tf.train.SyncReplicasOptimizer(
optimizer, replicas_to_aggregate=num_workers)
sync_replicas_hook = optimizer.make_session_run_hook(params.is_chief)
train_hooks.append(sync_replicas_hook)
# Create single grouped train op
train_op = [
optimizer.apply_gradients(
gradvars, global_step=tf.train.get_global_step())
]
train_op.extend(update_ops)
train_op = tf.group(*train_op)
predictions = {
'classes':
tf.concat([p['classes'] for p in tower_preds], axis=0),
'probabilities':
tf.concat([p['probabilities'] for p in tower_preds], axis=0)
}
stacked_labels = tf.concat(labels, axis=0)
accuracy = tf.metrics.accuracy(stacked_labels, predictions['classes'])
metrics = {'accuracy': accuracy}
tf.summary.scalar('accuracy', accuracy[1])
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
training_hooks=train_hooks,
eval_metric_ops=metrics)
def _resnet_model_fn_local(features, labels, mode, params):
"""Resnet model body for asynchoronous mode.
Args:
features: a list of tensors, one for each tower
labels: a list of tensors, one for each tower
mode: ModeKeys.TRAIN or EVAL
params: Hyperparameters suitable for tuning
Returns:
A EstimatorSpec object.
"""
init_op = []
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
weight_decay = params.weight_decay
momentum = params.momentum
features = features[0:num_gpus]
labels = labels[0:num_gpus]
tower_features = features
tower_labels = labels
tower_losses = []
tower_ops= []
tower_preds = []
var_scopes=[]
# channels first (NCHW) is normally optimal on GPU and channels last (NHWC)
# on CPU. The exception is Intel MKL on CPU which is optimal with
# channels_last.
data_format = params.data_format
if not data_format:
if num_gpus == 0:
data_format = 'channels_last'
else:
data_format = 'channels_first'
if num_gpus == 0:
num_devices = 1
device_type = 'cpu'
else:
num_devices = num_gpus
device_type = 'gpu'
for i in range(num_devices):
worker_device = '/{}:{}'.format(device_type, i)
if variable_strategy == 'CPU':
device_setter = utils.local_device_setter(
worker_device=worker_device)
# device_setter = tf.train.replica_device_setter(
# worker_device=worker_device)
elif variable_strategy == 'GPU':
device_setter = utils.local_device_setter(
ps_device_type='gpu',
worker_device=worker_device,
ps_strategy=tf.contrib.training.GreedyLoadBalancingStrategy(
num_gpus, tf.contrib.training.byte_size_load_fn))
# device_setter = tf.train.replica_device_setter(
# ps_device=worker_device,
# worker_device=worker_device
# )
with tf.variable_scope('resnet_{}'.format(i)) as var_scope:
with tf.name_scope('tower_%d' % i) as name_scope:
with tf.device(device_setter):
loss, gradvars, preds = _tower_fn(
is_training, weight_decay, tower_features[i], tower_labels[i],
data_format, params.num_layers, params.batch_norm_decay,
params.batch_norm_epsilon, var_scope.name, params.dataset)
var_scopes.append(var_scope.name)
# if ckpt_dir:
# init_op.append(tf.train.init_from_checkpoint(ckpt_dir,{'resnet_{}/'.format(i): 'resnet_{}/'.format(i)}))
tower_losses.append(loss)
# tower_gradvars.append(gradvars)
tower_preds.append(preds)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope)
# Updating parameters
# Suggested learning rate scheduling from
# https://github.com/ppwwyyxx/tensorpack/blob/master/examples/ResNet/cifar100-resnet.py#L155
num_images = cifar.CifarDataSet.num_examples_per_epoch('train', params.dataset)
# boundaries = [
# num_batches_per_epoch * x
# for x in np.array([30, 60, 80, 90], dtype=np.int64)
# ]
# staged_lr = [params.learning_rate * x for x in [1, 0.1, 0.01, 0.001, 1e-4]]
# learning_rate = tf.train.piecewise_constant(tf.train.get_global_step(),
# boundaries, staged_lr, name='lr_tower_{}'.format(i))
if params.dataset in ['cifar10','cifar100']:
learning_rate_fn = utils.learning_rate_with_decay(
batch_size=params.train_batch_size, batch_denom=params.train_batch_size,
num_images=num_images, boundary_epochs=[91, 136, 182],
decay_rates=[1, 0.1, 0.01, 0.001])
elif params.dataset == 'imagenet':
learning_rate_fn = utils.learning_rate_with_decay(
batch_size=params.train_batch_size, batch_denom=params.train_batch_size,
num_images=num_images, boundary_epochs=[30, 60, 80, 90],
decay_rates=[1, 0.1, 0.01, 0.001, 1e-4],
warmup=params.warmup, base_lr=params.learning_rate)
learning_rate = learning_rate_fn(tf.train.get_global_step())
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=momentum)
# Create single grouped train op
train_op = [
optimizer.apply_gradients(
gradvars, global_step=tf.train.get_global_step(), name='apply_gradient_tower_{}'.format(i))
]
train_op.extend(update_ops)
tower_ops.append(train_op)
# Device that runs the ops to apply global gradient updates.
consolidation_device = '/gpu:0' if variable_strategy == 'GPU' else '/cpu:0'
with tf.device(consolidation_device):
examples_sec_hook = utils.ExamplesPerSecondHook(
params.train_batch_size * (1 + params.redundancy), every_n_steps=10)
loss = tf.reduce_mean(tower_losses, name='loss')
tensors_to_log = {'learning_rate': learning_rate, 'loss': loss}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
train_hooks = [ logging_hook, examples_sec_hook]
if params.run_type == 'multi':
sync_hook = utils.SyncHook(scopes=var_scopes, every_n_steps=params.sync_step)
train_hooks.append(sync_hook)
train_ops = tf.group(*tower_ops)
predictions = {
'classes':
tf.concat([p['classes'] for p in tower_preds], axis=0),
'probabilities':
tf.concat([p['probabilities'] for p in tower_preds], axis=0)
}
stacked_labels = tf.concat(labels, axis=0)
accuracy = tf.metrics.accuracy(stacked_labels, predictions['classes'])
metrics = {'accuracy': accuracy}
tf.summary.scalar('accuracy', accuracy[1])
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_ops,
training_hooks=train_hooks,
eval_metric_ops=metrics
)
if run_type in ['sync', 'async']:
return _resnet_model_fn_sync
else:
return _resnet_model_fn_local
def _tower_fn(is_training, weight_decay, feature, label, data_format,
num_layers, batch_norm_decay, batch_norm_epsilon, scope, dataset_name='cifar10'):
"""Build computation tower (Resnet).
Args:
is_training: true if is training graph.
weight_decay: weight regularization strength, a float.
feature: a Tensor.
label: a Tensor.
data_format: channels_last (NHWC) or channels_first (NCHW).
num_layers: number of layers, an int.
batch_norm_decay: decay for batch normalization, a float.
batch_norm_epsilon: epsilon for batch normalization, a float.
scope: is the scope name that this tower is building its graph on
dataset_name: choices between cifar10 and cifar100
Returns:
A tuple with the loss for the tower, the gradients and parameters, and
predictions.
"""
if dataset_name in ['cifar10','cifar100']:
model = resnet_model.ResNetCifar(
num_layers,
batch_norm_decay=batch_norm_decay,
batch_norm_epsilon=batch_norm_epsilon,
is_training=is_training,
data_format=data_format,
dataset_name=dataset_name)
logits = model.forward_pass(feature, input_data_format='channels_last')
elif dataset_name == 'imagenet':
model = resnet_model.ImagenetModel(num_layers,1001, data_format=data_format)
logits = model(feature,is_training)
logits = tf.cast(logits, tf.float32)
tower_pred = {
'classes': tf.argmax(input=logits, axis=1),
'probabilities': tf.nn.softmax(logits)
}
tower_loss = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=label)
tower_loss = tf.reduce_mean(tower_loss)
model_params = tf.trainable_variables(scope=scope)
tower_loss += weight_decay * tf.add_n(
[tf.nn.l2_loss(v) for v in model_params])
tower_grad = tf.gradients(tower_loss, model_params)
return tower_loss, zip(tower_grad, model_params), tower_pred
def input_fn(data_dir,
subset,
num_shards,
batch_size,
use_distortion_for_training=True,
redundancy=0.0,
dataset_name='cifar10'):
"""Create input graph for model.
Args:
data_dir: Directory where TFRecords representing the dataset are located.
subset: one of 'train', | |
during standardization.
context_system = copy.deepcopy(context.getSystem())
context_integrator = context.getIntegrator()
# If the temperature is controlled by the integrator, the compatibility
# is independent on the parameters of the thermostat, so we add one
# identical to self._standard_system. We don't care if the integrator's
# temperature != self.temperature, so we set check_consistency=False.
if self._is_integrator_thermostated(context_integrator, check_consistency=False):
thermostat = self._find_thermostat(self._standard_system)
context_system.addForce(copy.deepcopy(thermostat))
# Compute and compare standard system hash.
self._standardize_system(context_system)
context_system_hash = self._compute_standard_system_hash(context_system)
is_compatible = self._standard_system_hash == context_system_hash
return is_compatible
def create_context(self, integrator, platform=None, platform_properties=None):
"""Create a context in this ThermodynamicState.
The context contains a copy of the system. If the integrator
is coupled to a heat bath (e.g. LangevinIntegrator), the system
in the context will not have a thermostat, and vice versa if
the integrator is not thermostated the system in the context will
have a thermostat.
An integrator is considered thermostated if it exposes a method
getTemperature(). A CompoundIntegrator is considered coupled to
a heat bath if at least one of its integrators is. An exception
is raised if the integrator is thermostated at a temperature
different from the thermodynamic state's.
Parameters
----------
integrator : simtk.openmm.Integrator
The integrator to use for Context creation. The eventual
heat bath temperature must be consistent with the
thermodynamic state.
platform : simtk.openmm.Platform, optional
Platform to use. If None, OpenMM tries to select the fastest
available platform. Default is None.
platform_properties : dict, optional
A dictionary of platform properties. Requires platform to be
specified.
Returns
-------
context : simtk.openmm.Context
The created OpenMM Context object.
Raises
------
ThermodynamicsError
If the integrator has a temperature different from this
ThermodynamicState.
ValueError
If platform_properties is specified, but platform is None
Examples
--------
When passing an integrator that does not expose getter and setter
for the temperature, the context will be created with a thermostat.
>>> from simtk import openmm, unit
>>> from openmmtools import testsystems
>>> toluene = testsystems.TolueneVacuum()
>>> state = ThermodynamicState(toluene.system, 300*unit.kelvin)
>>> integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
>>> context = state.create_context(integrator)
>>> system = context.getSystem()
>>> [force.__class__.__name__ for force in system.getForces()
... if 'Thermostat' in force.__class__.__name__]
['AndersenThermostat']
The thermostat is removed if we choose an integrator coupled
to a heat bath.
>>> del context # Delete previous context to free memory.
>>> integrator = openmm.LangevinIntegrator(300*unit.kelvin, 5.0/unit.picosecond,
... 2.0*unit.femtosecond)
>>> context = state.create_context(integrator)
>>> system = context.getSystem()
>>> [force.__class__.__name__ for force in system.getForces()
... if 'Thermostat' in force.__class__.__name__]
[]
"""
# Check that integrator is consistent and if it is thermostated.
# With CompoundIntegrator, at least one must be thermostated.
is_thermostated = self._is_integrator_thermostated(integrator)
# Get a copy of the system. If integrator is coupled
# to heat bath, remove the system thermostat.
system = self.get_system(remove_thermostat=is_thermostated)
# Create context.
if platform is None:
if platform_properties is not None:
raise ValueError("To set platform_properties, you need to also specify the platform.")
return openmm.Context(system, integrator)
elif platform_properties is None:
return openmm.Context(system, integrator, platform)
else:
return openmm.Context(system, integrator, platform, platform_properties)
def apply_to_context(self, context):
"""Apply this ThermodynamicState to the context.
The method apply_to_context does *not* check for the compatibility
of the context. The user is responsible for this. Depending on the
system size, is_context_compatible can be an expensive operation,
so is_state_compatible should be preferred when possible.
Parameters
----------
context : simtk.openmm.Context
The OpenMM Context to be set to this ThermodynamicState.
Raises
------
ThermodynamicsError
If the context is in a different thermodynamic ensemble w.r.t.
this state. This is just a quick check which does not substitute
is_state_compatible or is_context_compatible.
See Also
--------
ThermodynamicState.is_state_compatible
ThermodynamicState.is_context_compatible
Examples
--------
The method doesn't verify compatibility with the context, it is
the user's responsibility to do so, possibly with is_state_compatible
rather than is_context_compatible which is slower.
>>> from simtk import openmm, unit
>>> from openmmtools import testsystems
>>> toluene = testsystems.TolueneVacuum()
>>> state1 = ThermodynamicState(toluene.system, 273.0*unit.kelvin)
>>> state2 = ThermodynamicState(toluene.system, 310.0*unit.kelvin)
>>> integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
>>> context = state1.create_context(integrator)
>>> if state2.is_state_compatible(state1):
... state2.apply_to_context(context)
>>> context.getParameter(openmm.AndersenThermostat.Temperature())
310.0
"""
self._set_context_barostat(context, update_pressure=True, update_temperature=True, update_surface_tension=True)
self._set_context_thermostat(context)
# -------------------------------------------------------------------------
# Magic methods
# -------------------------------------------------------------------------
def __copy__(self):
"""Overwrite normal implementation to share standard system."""
cls = self.__class__
new_state = cls.__new__(cls)
new_state.__dict__.update({k: v for k, v in self.__dict__.items()
if k != '_standard_system'})
new_state.__dict__['_standard_system'] = self._standard_system
return new_state
def __deepcopy__(self, memo):
"""Overwrite normal implementation to share standard system."""
cls = self.__class__
new_state = cls.__new__(cls)
memo[id(self)] = new_state
for k, v in self.__dict__.items():
if k != '_standard_system':
new_state.__dict__[k] = copy.deepcopy(v, memo)
new_state.__dict__['_standard_system'] = self._standard_system
return new_state
_ENCODING = 'utf-8'
def __getstate__(self, skip_system=False):
"""Return a dictionary representation of the state.
Zlib compresses the serialized system after its created. Many
alchemical systems have very long serializations so this method
helps reduce space in memory and on disk. The compression forces
the encoding for compatibility between separate Python installs
(utf-8 by default).
Parameters
----------
skip_system: bool, Default: False
Choose whether or not to get the serialized system as the part
of the return. If False, then the serialized system is computed
and included in the serialization. If True, then ``None`` is
returned for the ``'standard_system'`` field of the serialization.
"""
serialized_system = None
if not skip_system:
serialized_system = openmm.XmlSerializer.serialize(self._standard_system)
serialized_system = zlib.compress(serialized_system.encode(self._ENCODING))
return dict(standard_system=serialized_system, temperature=self.temperature,
pressure=self.pressure, surface_tension=self._surface_tension)
def __setstate__(self, serialization):
"""Set the state from a dictionary representation."""
self._temperature = serialization['temperature']
self._pressure = serialization['pressure']
self._surface_tension = serialization['surface_tension']
serialized_system = serialization['standard_system']
# Decompress system, if need be
try:
serialized_system = zlib.decompress(serialized_system)
# Py2 returns the string, Py3 returns a byte string to decode, but if we
# decode the string in Py2 we get a unicode object that OpenMM can't parse.
if sys.version_info > (3, 0):
serialized_system = serialized_system.decode(self._ENCODING)
except (TypeError, zlib.error): # Py3/2 throws different error types
# Catch the "serialization is not compressed" error, do nothing to string.
# Preserves backwards compatibility
pass
self._standard_system_hash = serialized_system.__hash__()
# Check first if we have already the system in the cache.
try:
self._standard_system = self._standard_system_cache[self._standard_system_hash]
except KeyError:
system = openmm.XmlSerializer.deserialize(serialized_system)
self._standard_system_cache[self._standard_system_hash] = system
self._standard_system = system
# -------------------------------------------------------------------------
# Internal-usage: initialization
# -------------------------------------------------------------------------
def _initialize(self, system, temperature=None, pressure=None, surface_tension=None):
"""Initialize the thermodynamic state."""
# Avoid modifying the original system when setting temperature and pressure.
system = copy.deepcopy(system)
# If pressure is None, we try to infer the pressure from the barostat.
barostat = self._find_barostat(system)
if pressure is None and barostat is not None:
self._pressure = self._get_barostat_pressure(barostat)
else:
self._pressure = pressure # Pressure here can also be None.
# If surface tension is None, we try to infer the surface tension from the barostat.
barostat_type = type(barostat)
if surface_tension is None and barostat_type == openmm.MonteCarloMembraneBarostat:
self._surface_tension = barostat.getDefaultSurfaceTension()
elif surface_tension is not None and barostat_type != openmm.MonteCarloMembraneBarostat:
raise ThermodynamicsError(ThermodynamicsError.INCOMPATIBLE_ENSEMBLE)
else:
self._surface_tension = surface_tension
# If temperature is None, we infer the temperature from a thermostat.
if temperature is None:
thermostat = self._find_thermostat(system)
if thermostat is None:
raise ThermodynamicsError(ThermodynamicsError.NO_THERMOSTAT)
self._temperature = thermostat.getDefaultTemperature()
else:
self._temperature = temperature
# Fix system temperature/pressure if requested.
if temperature is not None:
self._set_system_temperature(system, temperature)
if pressure is not None:
self._set_system_pressure(system, pressure)
if surface_tension is not None:
self._set_system_surface_tension(system, surface_tension)
# We can use the unsafe set_system since the system has been copied.
self._unsafe_set_system(system, fix_state=False)
# -------------------------------------------------------------------------
# Internal-usage: system handling
# -------------------------------------------------------------------------
# Standard values are not standard in a physical sense, they are
# just consistent between ThermodynamicStates to make comparison
# of standard system hashes possible. We set this to round floats
# and use OpenMM units to avoid funniness due to precision errors
# caused by unit conversion.
_STANDARD_PRESSURE = 1.0*unit.bar
_STANDARD_TEMPERATURE = 273.0*unit.kelvin
_STANDARD_SURFACE_TENSION = 0.0*unit.nanometer*unit.bar
_NONPERIODIC_NONBONDED_METHODS = {openmm.NonbondedForce.NoCutoff,
openmm.NonbondedForce.CutoffNonPeriodic}
# Shared cache of standard systems to minimize memory consumption
# when simulating a lot of thermodynamic states. The cache holds
# only weak references so ThermodynamicState objects must keep the
# system as an internal variable.
_standard_system_cache = weakref.WeakValueDictionary()
def _unsafe_set_system(self, system, fix_state):
"""This implements self.set_system but modifies the passed system."""
# Configure temperature and pressure.
if fix_state:
# We just | |
<gh_stars>10-100
from moabb.datasets import BNCI2014001, Cho2017, PhysionetMI
from moabb.paradigms import MotorImagery
import numpy as np
from numpy.random import RandomState
import pickle
import time
import torch
import os
import pandas as pd
import mne
import scipy.signal as signal
import copy
from scipy.linalg import sqrtm, inv
from collections import defaultdict
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
cuda = torch.cuda.is_available()
print('gpu: ', cuda)
device = 'cuda' if cuda else 'cpu'
seed = 42
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
rng = RandomState(seed)
def print_info(source_data,dataset_name):
print("current dataset {}".format(dataset_name))
for subject_idx in range(len(source_data)):
print("source_data subject_idx {} has shape : {}, with range scale ({},{}) ".format(
subject_idx, source_data[subject_idx].shape,
np.max(source_data[subject_idx]), np.min(source_data[subject_idx])))
def print_dataset_info(data,dataset_name="train dataset A"):
print(dataset_name)
# for subject_idx in range(len(data)):
print("Train subject has shape : {}, with range scale ({},{}) ".format(
data.shape,
np.max(data), np.min(data)))
class filterBank(object):
"""
filter the given signal in the specific bands using cheby2 iir filtering.
If only one filter is specified then it acts as a simple filter and returns 2d matrix
Else, the output will be 3d with the filtered signals appended in the third dimension.
axis is the time dimension along which the filtering will be applied
"""
def __init__(self, filtBank, fs, filtAllowance=2, axis=-1, filtType='filter'):
self.filtBank = filtBank
self.fs = fs
self.filtAllowance = filtAllowance
self.axis = axis
self.filtType = filtType
def bandpassFilter(self, data, bandFiltCutF, fs, filtAllowance=2, axis=-1, filtType='filter'):
"""
Filter a signal using cheby2 iir filtering.
Parameters
----------
data: 2d/ 3d np array
trial x channels x time
bandFiltCutF: two element list containing the low and high cut off frequency in hertz.
if any value is specified as None then only one sided filtering will be performed
fs: sampling frequency
filtAllowance: transition bandwidth in hertz
filtType: string, available options are 'filtfilt' and 'filter'
Returns
-------
dataOut: 2d/ 3d np array after filtering
Data after applying bandpass filter.
"""
aStop = 30 # stopband attenuation
aPass = 3 # passband attenuation
nFreq = fs / 2 # Nyquist frequency
if (bandFiltCutF[0] == 0 or bandFiltCutF[0] is None) and (
bandFiltCutF[1] == None or bandFiltCutF[1] >= fs / 2.0):
# no filter
print("Not doing any filtering. Invalid cut-off specifications")
return data
elif bandFiltCutF[0] == 0 or bandFiltCutF[0] is None:
# low-pass filter
print("Using lowpass filter since low cut hz is 0 or None")
fPass = bandFiltCutF[1] / nFreq
fStop = (bandFiltCutF[1] + filtAllowance) / nFreq
# find the order
[N, ws] = signal.cheb2ord(fPass, fStop, aPass, aStop)
b, a = signal.cheby2(N, aStop, fStop, 'lowpass')
elif (bandFiltCutF[1] is None) or (bandFiltCutF[1] == fs / 2.0):
# high-pass filter
print("Using highpass filter since high cut hz is None or nyquist freq")
fPass = bandFiltCutF[0] / nFreq
fStop = (bandFiltCutF[0] - filtAllowance) / nFreq
# find the order
[N, ws] = signal.cheb2ord(fPass, fStop, aPass, aStop)
b, a = signal.cheby2(N, aStop, fStop, 'highpass')
else:
# band-pass filter
# print("Using bandpass filter")
fPass = (np.array(bandFiltCutF) / nFreq).tolist()
fStop = [(bandFiltCutF[0] - filtAllowance) / nFreq, (bandFiltCutF[1] + filtAllowance) / nFreq]
# find the order
[N, ws] = signal.cheb2ord(fPass, fStop, aPass, aStop)
b, a = signal.cheby2(N, aStop, fStop, 'bandpass')
if filtType == 'filtfilt':
dataOut = signal.filtfilt(b, a, data, axis=axis)
else:
dataOut = signal.lfilter(b, a, data, axis=axis)
return dataOut
def __call__(self, data1):
data = copy.deepcopy(data1)
d = data
# d = data['data']
# initialize output
out = np.zeros([*d.shape, len(self.filtBank)])
# print("out shape : ",out.shape)
# repetitively filter the data.
for i, filtBand in enumerate(self.filtBank):
filter = self.bandpassFilter(d, filtBand, self.fs, self.filtAllowance,
self.axis, self.filtType)
# print("filter shape : ",filter.shape)
out[:,:, :, i] =filter
# remove any redundant 3rd dimension
if len(self.filtBank) <= 1:
out = np.squeeze(out, axis=2)
# data['data'] = torch.from_numpy(out).float()
return out
# return data
class LabelAlignment:
"""
Label Alignment technique
https://arxiv.org/pdf/1912.01166.pdf
"""
def __init__(self,target_dataset):
"""
assume target_data is (trials,channels,samples)
target_label is (trials)
"""
self.target_data,self.target_label = target_dataset
self.target_r_op = self.generate_class_cov(self.target_data,self.target_label,invert=False)
# for k,v in self.target_r_op.items():
# print("target label {} has r_op : {}".format(k,v))
def convert_source_data_with_LA(self, source_data,source_label):
"""
Args:
source_data: (n_subject,(trials,channels,samples))
source_label: (n_subject,(trials))
Returns:
"""
new_source_data = list()
for subject in range(len(source_data)):
subject_data = source_data[subject]
subject_label = source_label[subject]
category_A_m = dict()
new_subject_data = list()
subject_category_r_op = self.generate_class_cov(subject_data,subject_label,invert=True)
for label in sorted(list(subject_category_r_op.keys())):
if label not in list(self.target_r_op.keys()):
print("current label {} is not in target dataset ".format(label))
return
source_r_op = subject_category_r_op[label]
target_r_op = self.target_r_op[label]
A_m = np.matmul(target_r_op, source_r_op)
category_A_m[label] = A_m
for trial in range(len(subject_data)):
trial_data = subject_data[trial]
trial_label = subject_label[trial]
trial_A_m = category_A_m[trial_label]
convert_trial_data = np.matmul(trial_A_m, trial_data)
new_subject_data.append(convert_trial_data)
new_subject_data = np.array(new_subject_data)
new_source_data.append(new_subject_data)
return new_source_data,source_label
def generate_class_cov(self,target_data,target_label,invert=True):
"""
Use the target data to generate an inverse Covariance for each class category.
Args:
target_data: (trials,channels,samples)
target_label: (trials)
Returns:
"""
category_data = defaultdict(list)
category_r_op = dict()
for data,label in zip(target_data,target_label):
# print("current label : ",label)
category_data[label].append(data)
for label,data in category_data.items():
data= np.array(data)
# print("data shape : ",data.shape)
if invert:
# print("calculate inv sqrt cov")
r_op = self.calculate_inv_sqrt_cov(data)
else:
# print("calculate sqrt cov")
r_op = self.calcualte_sqrt_cov(data)
category_r_op[label] = r_op
return category_r_op
def calculate_inv_sqrt_cov(self,data):
assert len(data.shape) == 3
# r = np.matmul(data, data.transpose((0, 2, 1))).mean(0)
#calculate covariance matrix of each trial
r = 0
for trial in data:
cov = np.cov(trial, rowvar=True)
r += cov
r = r/data.shape[0]
# print("origin cov : ", r)
if np.iscomplexobj(r):
print("covariance matrix problem")
if np.iscomplexobj(sqrtm(r)):
print("covariance matrix problem sqrt")
r_op = inv(sqrtm(r))
if np.iscomplexobj(r_op):
print("WARNING! Covariance matrix was not SPD somehow. Can be caused by running ICA-EOG rejection, if "
"not, check data!!")
# print("r op : ",r_op)
r_op = np.real(r_op).astype(np.float32)
elif not np.any(np.isfinite(r_op)):
print("WARNING! Not finite values in R Matrix")
return r_op
def calcualte_sqrt_cov(self,data):
assert len(data.shape) == 3
# r = np.matmul(data, data.transpose((0, 2, 1))).mean(0)
#calculate covariance matrix of each trial
r = 0
for trial in data:
cov = np.cov(trial, rowvar=True)
r += cov
r = r/data.shape[0]
if np.iscomplexobj(r):
print("covariance matrix problem")
if np.iscomplexobj(sqrtm(r)):
print("covariance matrix problem sqrt")
r_op = sqrtm(r)
return r_op
def expand_data_dim(data):
if isinstance(data, list):
for idx in range(len(data)):
if len(data[idx].shape) == 3:
new_data = np.expand_dims(data[idx], axis=1)
else:
new_data = data[idx]
data[idx] = new_data
return data
elif isinstance(data, np.ndarray):
if len(data.shape) == 3:
return np.expand_dims(data, axis=1)
else:
return data
else:
raise ValueError("the data format during the process section is not correct")
def normalization_channels(X):
# assert len(X) == len(y)
# Normalised, you could choose other normalisation strategy
if len(X.shape)==3:
#assume the data in format (trials,channels,samples)
axis=1
elif len(X.shape)==4:
# assume the data in format (trials,filter,channels,samples)
axis=2
else:
axis=-1
raise ValueError("there is problem with data format")
mean = np.mean(X,axis=axis,keepdims=True)
# here normalise across channels as an example, unlike the in the sleep kit
std = np.std(X, axis=axis, keepdims=True)
X = (X - mean) / std
return X
def normalization_time(X):
# assert len(X) == len(y)
# Normalised, you could choose other normalisation strategy
if len(X.shape)==3:
#assume the data in format (trials,channels,samples)
axis=2
elif len(X.shape)==4:
# assume the data in format (trials,filter,channels,samples)
axis=3
else:
axis=-1
raise ValueError("there is problem with data format")
mean = np.mean(X,axis=axis,keepdims=True)
# here normalise across channels as an example, unlike the in the sleep kit
std = np.std(X, axis=axis, keepdims=True)
X = (X - mean) / std
return X
def shuffle_data(subject_data,subject_label):
available_index = np.arange(subject_data.shape[0])
print("avail index : ",available_index)
shuffle_index = np.random.permutation(available_index)
print("shuffle index : ",shuffle_index)
shuffle_subject_data = subject_data[shuffle_index,]
shuffle_subject_label = subject_label[shuffle_index,]
return [shuffle_subject_data,shuffle_subject_label]
# def shuffle_data(subject_data,subject_label):
# return [subject_data,subject_label]
def modify_data(data,time=256):
return data[:, :, :time]
def get_dataset_A_ch():
ch_names_A = ['Fp1', 'Fz', 'F3', 'F7', 'FT9', 'FC5', 'FC1', 'C3', 'T7',
'TP9', 'CP5', 'CP1', 'Pz', 'P3', 'P7', 'O1', 'Oz',
'O2', 'P4', 'P8', 'TP10', 'CP6', 'CP2', 'C4', 'T8',
'FT10', 'FC6', 'FC2', 'F4', 'F8', 'Fp2', 'AF7', 'AF3',
'AFz', 'F1', 'F5', 'FT7', 'FC3', 'FCz', 'C1', 'C5',
'TP7', 'CP3', 'P1', 'P5', 'PO7', 'PO3', 'POz', 'PO4',
'PO8', 'P6', 'P2', 'CPz', 'CP4', 'TP8', 'C6', 'C2',
'FC4', 'FT8', 'F6', 'F2', 'AF4', 'AF8']
return ch_names_A
def get_dataset_B_ch():
ch_names_B = ['Fp1', 'Fp2', 'F3',
'Fz', 'F4', 'FC5', 'FC1', 'FC2', 'FC6', 'C5', 'C3',
'C1', 'Cz', 'C2', 'C4', 'C6', 'CP5', 'CP3', 'CP1',
'CPz', 'CP2', 'CP4', 'CP6', 'P7', 'P5', 'P3', 'P1', 'Pz',
'P2', 'P4', 'P6', 'P8']
return ch_names_B
def generate_common_chan_test_data(ch_names_A=None,ch_names_B=None):
if not ch_names_B:
ch_names_B =get_dataset_B_ch()
if not ch_names_A:
ch_names_A = get_dataset_A_ch()
commonList = []
for chan_A in ch_names_A:
for chan_B in ch_names_B:
if chan_A == chan_B:
commonList.append(chan_A)
return commonList
def correct_EEG_data(data,channels,correct_chans_order):
new_eeg_data = np.zeros((data.shape[0],len(correct_chans_order),data.shape[2]))
print("current BCI | |
class, gets or sets the position within the current stream.
Get: Position(self: Stream) -> Int64
Set: Position(self: Stream) = value
"""
ReadTimeout = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets a value, in miliseconds, that determines how long the stream will attempt to read before timing out.
Get: ReadTimeout(self: Stream) -> int
Set: ReadTimeout(self: Stream) = value
"""
WriteTimeout = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets a value, in miliseconds, that determines how long the stream will attempt to write before timing out.
Get: WriteTimeout(self: Stream) -> int
Set: WriteTimeout(self: Stream) = value
"""
Null = None
class BufferedStream(Stream, IDisposable):
"""
Adds a buffering layer to read and write operations on another stream. This class cannot be inherited.
BufferedStream(stream: Stream)
BufferedStream(stream: Stream, bufferSize: int)
"""
def BeginRead(self, buffer, offset, count, callback, state):
""" BeginRead(self: BufferedStream, buffer: Array[Byte], offset: int, count: int, callback: AsyncCallback, state: object) -> IAsyncResult """
pass
def BeginWrite(self, buffer, offset, count, callback, state):
""" BeginWrite(self: BufferedStream, buffer: Array[Byte], offset: int, count: int, callback: AsyncCallback, state: object) -> IAsyncResult """
pass
def CreateWaitHandle(self, *args): #cannot find CLR method
"""
CreateWaitHandle(self: Stream) -> WaitHandle
Allocates a System.Threading.WaitHandle object.
Returns: A reference to the allocated WaitHandle.
"""
pass
def Dispose(self):
""" Dispose(self: BufferedStream, disposing: bool) """
pass
def EndRead(self, asyncResult):
""" EndRead(self: BufferedStream, asyncResult: IAsyncResult) -> int """
pass
def EndWrite(self, asyncResult):
""" EndWrite(self: BufferedStream, asyncResult: IAsyncResult) """
pass
def Flush(self):
"""
Flush(self: BufferedStream)
Clears all buffers for this stream and causes any buffered data to be written
to the underlying device.
"""
pass
def FlushAsync(self, cancellationToken=None):
""" FlushAsync(self: BufferedStream, cancellationToken: CancellationToken) -> Task """
pass
def MemberwiseClone(self, *args): #cannot find CLR method
"""
MemberwiseClone(self: MarshalByRefObject, cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity, which
will cause the object to be assigned a new identity when it is marshaled across
a remoting boundary. A value of false is usually appropriate. true to copy the
current System.MarshalByRefObject object's identity to its clone, which will
cause remoting client calls to be routed to the remote server object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def ObjectInvariant(self, *args): #cannot find CLR method
"""
ObjectInvariant(self: Stream)
Provides support for a System.Diagnostics.Contracts.Contract.
"""
pass
def Read(self, array, offset, count):
"""
Read(self: BufferedStream, offset: int, count: int) -> (int, Array[Byte])
Copies bytes from the current buffered stream to an array.
offset: The byte offset in the buffer at which to begin reading bytes.
count: The number of bytes to be read.
Returns: The total number of bytes read into array. This can be less than the number of
bytes requested if that many bytes are not currently available, or 0 if the end
of the stream has been reached before any data can be read.
"""
pass
def ReadAsync(self, buffer, offset, count, cancellationToken=None):
""" ReadAsync(self: BufferedStream, buffer: Array[Byte], offset: int, count: int, cancellationToken: CancellationToken) -> Task[int] """
pass
def ReadByte(self):
"""
ReadByte(self: BufferedStream) -> int
Reads a byte from the underlying stream and returns the byte cast to an int, or
returns -1 if reading from the end of the stream.
Returns: The byte cast to an int, or -1 if reading from the end of the stream.
"""
pass
def Seek(self, offset, origin):
"""
Seek(self: BufferedStream, offset: Int64, origin: SeekOrigin) -> Int64
Sets the position within the current buffered stream.
offset: A byte offset relative to origin.
origin: A value of type System.IO.SeekOrigin indicating the reference point from which
to obtain the new position.
Returns: The new position within the current buffered stream.
"""
pass
def SetLength(self, value):
"""
SetLength(self: BufferedStream, value: Int64)
Sets the length of the buffered stream.
value: An integer indicating the desired length of the current buffered stream in
bytes.
"""
pass
def Write(self, array, offset, count):
"""
Write(self: BufferedStream, array: Array[Byte], offset: int, count: int)
Copies bytes to the buffered stream and advances the current position within
the buffered stream by the number of bytes written.
array: The byte array from which to copy count bytes to the current buffered stream.
offset: The offset in the buffer at which to begin copying bytes to the current
buffered stream.
count: The number of bytes to be written to the current buffered stream.
"""
pass
def WriteAsync(self, buffer, offset, count, cancellationToken=None):
""" WriteAsync(self: BufferedStream, buffer: Array[Byte], offset: int, count: int, cancellationToken: CancellationToken) -> Task """
pass
def WriteByte(self, value):
"""
WriteByte(self: BufferedStream, value: Byte)
Writes a byte to the current position in the buffered stream.
value: A byte to write to the stream.
"""
pass
def __enter__(self, *args): #cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): #cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, stream, bufferSize=None):
"""
__new__(cls: type, stream: Stream)
__new__(cls: type, stream: Stream, bufferSize: int)
"""
pass
CanRead = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating whether the current stream supports reading.
Get: CanRead(self: BufferedStream) -> bool
"""
CanSeek = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating whether the current stream supports seeking.
Get: CanSeek(self: BufferedStream) -> bool
"""
CanWrite = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating whether the current stream supports writing.
Get: CanWrite(self: BufferedStream) -> bool
"""
Length = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the stream length in bytes.
Get: Length(self: BufferedStream) -> Int64
"""
Position = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the position within the current stream.
Get: Position(self: BufferedStream) -> Int64
Set: Position(self: BufferedStream) = value
"""
class Directory(object):
""" Exposes static methods for creating, moving, and enumerating through directories and subdirectories. This class cannot be inherited. """
@staticmethod
def CreateDirectory(path, directorySecurity=None):
"""
CreateDirectory(path: str, directorySecurity: DirectorySecurity) -> DirectoryInfo
Creates all the directories in the specified path, applying the specified
Windows security.
path: The directory to create.
directorySecurity: The access control to apply to the directory.
Returns: A System.IO.DirectoryInfo object representing the newly created directory.
CreateDirectory(path: str) -> DirectoryInfo
Creates all directories and subdirectories in the specified path.
path: The directory path to create.
Returns: A System.IO.DirectoryInfo as specified by path.
"""
pass
@staticmethod
def Delete(path, recursive=None):
"""
Delete(path: str, recursive: bool)
Deletes the specified directory and, if indicated, any subdirectories and files
in the directory.
path: The name of the directory to remove.
recursive: true to remove directories, subdirectories, and files in path; otherwise, false.
Delete(path: str)
Deletes an empty directory from a specified path.
path: The name of the empty directory to remove. This directory must be writable or
empty.
"""
pass
@staticmethod
def EnumerateDirectories(path, searchPattern=None, searchOption=None):
"""
EnumerateDirectories(path: str, searchPattern: str, searchOption: SearchOption) -> IEnumerable[str]
Returns an enumerable collection of directory names that match a search pattern
in a specified path, and optionally searches subdirectories.
path: The directory to search.
searchPattern: The search string to match against the names of directories in path.
searchOption: One of the values of the System.IO.SearchOption | |
<reponame>tobias-liaudat/wf-psf<filename>jz-submissions/scripts/train_eval_poly_200.py
#!/usr/bin/env python
# coding: utf-8
# PSF modelling and evaluation
from absl import app
from absl import flags
import sys
import numpy as np
import time
import wf_psf as wf
import tensorflow as tf
# import tensorflow as tf
import tensorflow_addons as tfa
## Training flags
# Model definition
flags.DEFINE_string("model", "poly", "Model type. Options are: 'mccd', 'poly, 'param'.")
flags.DEFINE_string("id_name", "_test-coherent_euclid_200stars", "Model saving id.")
# Saving paths
flags.DEFINE_string("base_path", "/gpfswork/rech/xdy/ulx23va/wf-outputs/", "Base path for saving files.")
flags.DEFINE_string("log_folder", "log-files/", "Folder name to save log files.")
flags.DEFINE_string("model_folder", "chkp/", "Folder name to save trained models.")
flags.DEFINE_string("optim_hist_folder", "optim-hist/", "Folder name to save optimisation history files.")
flags.DEFINE_string("chkp_save_path", "/gpfsscratch/rech/xdy/ulx23va/wf-outputs/chkp/", "Path to save model checkpoints during training.")
# Input dataset paths
flags.DEFINE_string("dataset_folder", "/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/", "Folder path of datasets.")
flags.DEFINE_string("train_dataset_file", "train_Euclid_res_200_TrainStars_id_001.npy", "Train dataset file name.")
flags.DEFINE_string("test_dataset_file", "test_Euclid_res_id_001.npy", "Test dataset file name.")
# Model parameters
flags.DEFINE_integer("n_zernikes", 15, "Zernike polynomial modes to use on the parametric part.")
flags.DEFINE_integer("pupil_diameter", 256, "Dimension of the OPD/Wavefront space.")
flags.DEFINE_integer("n_bins_lda", 20, "Number of wavelength bins to use to reconstruct polychromatic objects.")
flags.DEFINE_float("output_Q", 3., "Downsampling rate to match the specified telescope's sampling from the oversampling rate used in the model.")
flags.DEFINE_float("oversampling_rate", 3., "Oversampling rate used for the OPD/WFE PSF model.")
flags.DEFINE_integer("output_dim", 32, "Dimension of the pixel PSF postage stamp.")
flags.DEFINE_integer("d_max", 2, "Max polynomial degree of the parametric part.")
flags.DEFINE_integer("d_max_nonparam", 3, "Max polynomial degree of the non-parametric part.")
flags.DEFINE_list("x_lims", [0, 1e3], "Limits of the PSF field coordinates for the x axis.")
flags.DEFINE_list("y_lims", [0, 1e3], "Limits of the PSF field coordinates for the y axis.")
flags.DEFINE_integer("graph_features", 10, "Number of graph-constrained features of the non-parametric part.")
flags.DEFINE_float("l1_rate", 1e-8, "L1 regularisation parameter for the non-parametric part.")
# Training parameters
flags.DEFINE_integer("batch_size", 32, "Batch size used for the trainingin the stochastic gradient descend type of algorithm.")
flags.DEFINE_list("l_rate_param", [1e-2, 1e-2], "Learning rates for the parametric parts.")
flags.DEFINE_list("l_rate_non_param", [1e-1, 1e-1], "Learning rates for the non-parametric parts.")
flags.DEFINE_list("n_epochs_param", [2, 2], "Number of training epochs of the parametric parts.")
flags.DEFINE_list("n_epochs_non_param", [2, 2], "Number of training epochs of the non-parametric parts.")
flags.DEFINE_integer("total_cycles", 2, "Total amount of cycles to perform. For the moment the only available options are '1' or '2'.")
## Evaluation flags
# Saving paths
flags.DEFINE_string("metric_base_path", "/gpfswork/rech/xdy/ulx23va/wf-outputs/metrics/", "Base path for saving metric files.")
flags.DEFINE_string("saved_model_type", "final", "Type of saved model to use for the evaluation. Can be 'final' or 'checkpoint'.")
flags.DEFINE_string("saved_cycle", "cycle2", "Saved cycle to use for the evaluation. Can be 'cycle1' or 'cycle2'.")
# Evaluation parameters
flags.DEFINE_integer("GT_n_zernikes", 45, "Zernike polynomial modes to use on the ground truth model parametric part.")
flags.DEFINE_integer("eval_batch_size", 16, "Batch size to use for the evaluation.")
# Define flags
FLAGS = flags.FLAGS
def train_model():
""" Train the model defined in the flags. """
# Start measuring elapsed time
starting_time = time.time()
# Define model run id
run_id_name = FLAGS.model + FLAGS.id_name
# Define paths
log_save_file = FLAGS.base_path + FLAGS.log_folder
model_save_file= FLAGS.base_path + FLAGS.model_folder
optim_hist_file = FLAGS.base_path + FLAGS.optim_hist_folder
saving_optim_hist = dict()
# Save output prints to logfile
old_stdout = sys.stdout
log_file = open(log_save_file + run_id_name + '_output.log','w')
sys.stdout = log_file
print('Starting the log file.')
# Print GPU and tensorflow info
device_name = tf.test.gpu_device_name()
print('Found GPU at: {}'.format(device_name))
print('tf_version: ' + str(tf.__version__))
## Prepare the inputs
# Generate Zernike maps
zernikes = wf.utils.zernike_generator(n_zernikes=FLAGS.n_zernikes, wfe_dim=FLAGS.pupil_diameter)
# Now as cubes
np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
for it in range(len(zernikes)):
np_zernike_cube[it,:,:] = zernikes[it]
np_zernike_cube[np.isnan(np_zernike_cube)] = 0
tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
print('Zernike cube:')
print(tf_zernike_cube.shape)
## Load the dictionaries
train_dataset = np.load(FLAGS.dataset_folder + FLAGS.train_dataset_file, allow_pickle=True)[()]
# train_stars = train_dataset['stars']
# noisy_train_stars = train_dataset['noisy_stars']
# train_pos = train_dataset['positions']
train_SEDs = train_dataset['SEDs']
# train_zernike_coef = train_dataset['zernike_coef']
train_C_poly = train_dataset['C_poly']
train_parameters = train_dataset['parameters']
test_dataset = np.load(FLAGS.dataset_folder + FLAGS.test_dataset_file, allow_pickle=True)[()]
# test_stars = test_dataset['stars']
# test_pos = test_dataset['positions']
test_SEDs = test_dataset['SEDs']
# test_zernike_coef = test_dataset['zernike_coef']
# Convert to tensor
tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
print('Dataset parameters:')
print(train_parameters)
## Generate initializations
# Prepare np input
simPSF_np = wf.SimPSFToolkit(zernikes, max_order=FLAGS.n_zernikes,
pupil_diameter=FLAGS.pupil_diameter, output_dim=FLAGS.output_dim,
oversampling_rate=FLAGS.oversampling_rate, output_Q=FLAGS.output_Q)
simPSF_np.gen_random_Z_coeffs(max_order=FLAGS.n_zernikes)
z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
simPSF_np.set_z_coeffs(z_coeffs)
simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
# Obscurations
obscurations = simPSF_np.generate_pupil_obscurations(N_pix=FLAGS.pupil_diameter, N_filter=2)
tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
# Initialize the SED data list
packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=FLAGS.n_bins_lda)
for _sed in train_SEDs]
# Prepare the inputs for the training
tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
inputs = [tf_train_pos, tf_packed_SED_data]
# Select the observed stars (noisy or noiseless)
outputs = tf_noisy_train_stars
# outputs = tf_train_stars
## Prepare validation data inputs
val_SEDs = test_SEDs
tf_val_pos = tf_test_pos
tf_val_stars = tf_test_stars
# Initialize the SED data list
val_packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=FLAGS.n_bins_lda)
for _sed in val_SEDs]
# Prepare the inputs for the validation
tf_val_packed_SED_data = tf.convert_to_tensor(val_packed_SED_data, dtype=tf.float32)
tf_val_packed_SED_data = tf.transpose(tf_val_packed_SED_data, perm=[0, 2, 1])
# Prepare input validation tuple
val_x_inputs = [tf_val_pos, tf_val_packed_SED_data]
val_y_inputs = tf_val_stars
val_data = (val_x_inputs, val_y_inputs)
## Select the model
if FLAGS.model == 'mccd':
poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
obs_pos=tf_train_pos.numpy(),
x_lims=FLAGS.x_lims,
y_lims=FLAGS.y_lims,
d_max=FLAGS.d_max_nonparam,
graph_features=FLAGS.graph_features)
spatial_dic = [poly_dic, graph_dic]
# Initialize the model
tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
obscurations=tf_obscurations,
batch_size=FLAGS.batch_size,
obs_pos=tf_train_pos,
spatial_dic=spatial_dic,
output_Q=FLAGS.output_Q,
d_max_nonparam=FLAGS.d_max_nonparam,
graph_features=FLAGS.graph_features,
l1_rate=FLAGS.l1_rate,
output_dim=FLAGS.output_dim,
n_zernikes=FLAGS.n_zernikes,
d_max=FLAGS.d_max,
x_lims=FLAGS.x_lims,
y_lims=FLAGS.y_lims)
elif FLAGS.model == 'poly':
# # Initialize the model
tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
obscurations=tf_obscurations,
batch_size=FLAGS.batch_size,
output_Q=FLAGS.output_Q,
d_max_nonparam=FLAGS.d_max_nonparam,
output_dim=FLAGS.output_dim,
n_zernikes=FLAGS.n_zernikes,
d_max=FLAGS.d_max,
x_lims=FLAGS.x_lims,
y_lims=FLAGS.y_lims)
elif FLAGS.model == 'param':
# Initialize the model
tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
obscurations=tf_obscurations,
batch_size=FLAGS.batch_size,
output_Q=FLAGS.output_Q,
output_dim=FLAGS.output_dim,
n_zernikes=FLAGS.n_zernikes,
d_max=FLAGS.d_max,
x_lims=FLAGS.x_lims,
y_lims=FLAGS.y_lims)
# # Model Training
# Prepare the saving callback
# Prepare to save the model as a callback
filepath_chkp_callback = FLAGS.chkp_save_path + 'chkp_callback_' + run_id_name + '_cycle1'
model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath_chkp_callback,
monitor='mean_squared_error', verbose=1, save_best_only=True,
save_weights_only=False, mode='min', save_freq='epoch',
options=None)
# Prepare the optimisers
param_optim = tfa.optimizers.RectifiedAdam(lr=FLAGS.l_rate_param[0])
non_param_optim = tfa.optimizers.RectifiedAdam(lr=FLAGS.l_rate_non_param[0])
print('Starting cycle 1..')
start_cycle1 = time.time()
if FLAGS.model == 'param':
tf_semiparam_field, hist_param = wf.train_utils.param_train_cycle(
tf_semiparam_field,
inputs=inputs,
outputs=outputs,
val_data=val_data,
batch_size=FLAGS.batch_size,
l_rate=FLAGS.l_rate_param[0],
n_epochs=FLAGS.n_epochs_param[0],
param_optim=param_optim,
param_loss=None,
param_metrics=None,
param_callback=None,
general_callback=[model_chkp_callback],
verbose=2)
else:
tf_semiparam_field, hist_param, hist_non_param = wf.train_utils.general_train_cycle(
tf_semiparam_field,
inputs=inputs,
outputs=outputs,
val_data=val_data,
batch_size=FLAGS.batch_size,
l_rate_param=FLAGS.l_rate_param[0],
l_rate_non_param=FLAGS.l_rate_non_param[0],
n_epochs_param=FLAGS.n_epochs_param[0],
n_epochs_non_param=FLAGS.n_epochs_non_param[0],
param_optim=param_optim,
non_param_optim=non_param_optim,
param_loss=None, non_param_loss=None,
param_metrics=None, non_param_metrics=None,
param_callback=None, non_param_callback=None,
general_callback=[model_chkp_callback],
first_run=True,
verbose=2)
# Save weights
tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle1')
end_cycle1 = time.time()
print('Cycle1 elapsed time: %f'%(end_cycle1-start_cycle1))
# Save optimisation history in the saving dict
saving_optim_hist['param_cycle1'] = hist_param.history
if FLAGS.model != 'param':
saving_optim_hist['nonparam_cycle1'] = hist_non_param.history
if FLAGS.total_cycles >= 2:
# Prepare to save the model as a callback
filepath_chkp_callback = FLAGS.chkp_save_path + 'chkp_callback_' + run_id_name + '_cycle2'
model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath_chkp_callback,
monitor='mean_squared_error', verbose=1, save_best_only=True,
save_weights_only=False, mode='min', save_freq='epoch',
options=None)
# Prepare the optimisers
param_optim = tfa.optimizers.RectifiedAdam(lr=FLAGS.l_rate_param[1])
non_param_optim = tfa.optimizers.RectifiedAdam(lr=FLAGS.l_rate_non_param[1])
print('Starting cycle 2..')
start_cycle2 = time.time()
# Compute the next cycle
if FLAGS.model == 'param':
tf_semiparam_field, hist_param_2 = wf.train_utils.param_train_cycle(
tf_semiparam_field,
inputs=inputs,
outputs=outputs,
val_data=val_data,
batch_size=FLAGS.batch_size,
l_rate=FLAGS.l_rate_param[1],
n_epochs=FLAGS.n_epochs_param[1],
param_optim=param_optim,
param_loss=None,
param_metrics=None,
param_callback=None,
general_callback=[model_chkp_callback],
verbose=2)
else:
# Compute the next cycle
tf_semiparam_field, hist_param_2, hist_non_param_2 = wf.train_utils.general_train_cycle(
tf_semiparam_field,
inputs=inputs,
outputs=outputs,
val_data=val_data,
batch_size=FLAGS.batch_size,
l_rate_param=FLAGS.l_rate_param[1],
l_rate_non_param=FLAGS.l_rate_non_param[1],
n_epochs_param=FLAGS.n_epochs_param[1],
n_epochs_non_param=FLAGS.n_epochs_non_param[1],
param_optim=param_optim,
non_param_optim=non_param_optim,
param_loss=None, non_param_loss=None,
param_metrics=None, non_param_metrics=None,
param_callback=None, non_param_callback=None,
general_callback=[model_chkp_callback],
first_run=False,
verbose=2)
# Save the weights at the end of the second cycle
tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle2')
end_cycle2 = time.time()
print('Cycle2 elapsed time: %f'%(end_cycle2 - start_cycle2))
# Save optimisation history in the saving dict
saving_optim_hist['param_cycle2'] = hist_param_2.history
if FLAGS.model != 'param':
saving_optim_hist['nonparam_cycle2'] = hist_non_param_2.history
# Save optimisation history dictionary
np.save(optim_hist_file + 'optim_hist_' + run_id_name + '.npy', saving_optim_hist)
## Print final time
final_time = time.time()
print('\nTotal elapsed time: %f'%(final_time - starting_time))
## Close log file
print('\n Good bye..')
sys.stdout = old_stdout
log_file.close()
def evaluate_model():
""" Evaluate the trained model."""
# Start measuring elapsed time
starting_time = time.time()
# Define model run id
run_id_name = FLAGS.model + FLAGS.id_name
# Define paths
log_save_file = FLAGS.base_path + FLAGS.log_folder
# Define saved model to use
if FLAGS.saved_model_type == 'checkpoint':
weights_paths = FLAGS.chkp_save_path + 'chkp_callback_' + run_id_name + '_' + FLAGS.saved_cycle
elif FLAGS.saved_model_type == 'final':
model_save_file= FLAGS.base_path + FLAGS.model_folder
weights_paths = model_save_file + 'chkp_' + run_id_name + '_' + FLAGS.saved_cycle
## Save output prints to logfile
old_stdout = sys.stdout
log_file = open(log_save_file + run_id_name + '-metrics_output.log', 'w')
sys.stdout = log_file
print('Starting the log file.')
## Check GPU and tensorflow version
device_name = tf.test.gpu_device_name()
print('Found GPU at: {}'.format(device_name))
print('tf_version: ' + str(tf.__version__))
## Load datasets
train_dataset = np.load(FLAGS.dataset_folder + FLAGS.train_dataset_file, allow_pickle=True)[()]
# train_stars = train_dataset['stars']
# noisy_train_stars = train_dataset['noisy_stars']
# train_pos = train_dataset['positions']
train_SEDs = train_dataset['SEDs']
# train_zernike_coef = train_dataset['zernike_coef']
train_C_poly = train_dataset['C_poly']
train_parameters = train_dataset['parameters']
test_dataset = np.load(FLAGS.dataset_folder | |
<reponame>matthew-e-brown/Grade-11-Python-CCA<filename>pygame Keypress Game.py
##########################################
## <NAME>, Bayside Secondary ##
## School. ##
## ##
## V1.0 created Apr.10 to June.15, ##
## 2017. ##
## ##
## All assets made 100% by Me, ##
## including sounds. ##
## ##
## The game itself was inspired by ##
## Nerd³'s game, Systems Nominal. ##
## https://www.nerdcubed.co.uk/games/ ##
##########################################
import pygame, math, sys, time, random, os, pygame.gfxdraw
pygame.mixer.pre_init(22050, -16, 2, 1024)
pygame.init()
pygame.mixer.init()
##Define some colours
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GRAY = (230, 230, 230)
GREEN = (10, 160, 55)
RED = (215, 35, 35)
LIGHTGREEN = (75, 245, 125)
LIGHTRED = (255, 60, 60)
DARKRED = (153, 0, 0)
YELLOW = (255, 230, 35)
BLUE = (0, 0, 255)
SKYBLUE = (150, 215, 255)
WN_WIDTH = 1000 #these two lines make it easeir to place things on the screen by relating it to the window's size.
WN_HEIGHT = 800 #they are also useful because they let you quickly change the window size and keep most things in the same place.
size = (WN_WIDTH, WN_HEIGHT)
screen = pygame.display.set_mode((size))
pygame.display.set_caption("Keypress Game")
clock = pygame.time.Clock()
##Define some Fonts
monitorFont = pygame.font.Font("C:/Windows/Fonts/consola.ttf", 17)
monitorFontBold = pygame.font.Font("C:/Windows/Fonts/consolab.ttf", 17)
monitorFontBig = pygame.font.Font("C:/Windows/Fonts/consola.ttf", 62)
monitorFontBigBold = pygame.font.Font("C:/Windows/Fonts/consolab.ttf", 62)
##Define some Texts
def timerText(x, y): ## This has a special function so I can blit two chunks of text together using x and y as arugments. It keeps things relative.
if int((timer-elapsed)*100+0.5)/100 > 0 and strikes > mistakes:
timerText1 = monitorFont.render(">>> You have " + str(int((timer - elapsed)*100+0.5)/100), True, WHITE)
timerText2 = monitorFont.render(" seconds left!", True, WHITE)
screen.blit(timerText1,[x, y])
screen.blit(timerText2,[x+162.5, y])
elif int((timer-elapsed)*100+0.5)/100 <= 0 or strikes <= mistakes:
timerText = monitorFont.render(">>> You have 0.00 seconds left!", True, WHITE)
screen.blit(timerText,[x, y])
def highscoresText(x, y): ## Running this command will blit a block of the highscores onto the screen as if it was a sprite.
highscores = scoresSTRING() #the scoresSTRING() function returns a list made of tuples, that look like: (ANA, 14313) #(not that she'd ever get a score that high)
names = scoresSTRING()
for i in range(0, (len(highscores))):
highscores[i] = (highscores[i])[0]
for i in range(0, (len(names))):
names[i] = (names[i])[1]
## ^^ that block takes the two lists names and highscores (which both look the same currently)
## and makes them things like (ANA, MAT, ZAC, MVM) and (132, 20000, 1220, 0) (except in order).
highscoresText = monitorFont.render(">>> Highscores are:", True, WHITE)
if "b" not in highscores[0]:
highscore1Text = monitorFont.render(">>> 1. " + names[0] + " -- " + highscores[0], True, WHITE)
elif "b" in highscores[0]:
highscore1Text = monitorFontBold.render(">>> 1. " + names[0] + " -- " + highscores[0].split(":")[0], True, YELLOW)
if "b" not in highscores[1]:
highscore2Text = monitorFont.render(">>> 2. " + names[1] + " -- " + highscores[1], True, WHITE)
elif "b" in highscores[1]:
highscore2Text = monitorFontBold.render(">>> 2. " + names[1] + " -- " + highscores[1].split(":")[0], True, YELLOW)
if "b" not in highscores[2]:
highscore3Text = monitorFont.render(">>> 3. " + names[2] + " -- " + highscores[2], True, WHITE)
elif "b" in highscores[2]:
highscore3Text = monitorFontBold.render(">>> 3. " + names[2] + " -- " + highscores[2].split(":")[0], True, YELLOW)
if "b" not in highscores[3]:
highscore4Text = monitorFont.render(">>> 4. " + names[3] + " -- " + highscores[3], True, WHITE)
elif "b" in highscores[3]:
highscore4Text = monitorFontBold.render(">>> 4. " + names[3] + " -- " + highscores[3].split(":")[0], True, YELLOW)
if "b" not in highscores[4]:
highscore5Text = monitorFont.render(">>> 5. " + names[4] + " -- " + highscores[4], True, WHITE)
elif "b" in highscores[4]:
highscore5Text = monitorFontBold.render(">>> 5. " + names[4] + " -- " + highscores[4].split(":")[0], True, YELLOW)
if "b" not in highscores[5]:
highscore6Text = monitorFont.render(">>> 6. " + names[5] + " -- " + highscores[5], True, WHITE)
elif "b" in highscores[5]:
highscore6Text = monitorFontBold.render(">>> 6. " + names[5] + " -- " + highscores[5].split(":")[0], True, YELLOW)
if "b" not in highscores[6]:
highscore7Text = monitorFont.render(">>> 7. " + names[6] + " -- " + highscores[6], True, WHITE)
elif "b" in highscores[6]:
highscore7Text = monitorFontBold.render(">>> 7. " + names[6] + " -- " + highscores[6].split(":")[0], True, YELLOW)
if "b" not in highscores[7]:
highscore8Text = monitorFont.render(">>> 8. " + names[7] + " -- " + highscores[7], True, WHITE)
elif "b" in highscores[7]:
highscore8Text = monitorFontBold.render(">>> 8. " + names[7] + " -- " + highscores[7].split(":")[0], True, YELLOW)
if "b" not in highscores[8]:
highscore9Text = monitorFont.render(">>> 9. " + names[8] + " -- " + highscores[8], True, WHITE)
elif "b" in highscores[8]:
highscore9Text = monitorFontBold.render(">>> 9. " + names[8] + " -- " + highscores[8].split(":")[0], True, YELLOW)
if "b" not in highscores[9]:
highscore10Text = monitorFont.render(">>> 10. " + names[9] + " -- " + highscores[9], True, WHITE)
elif "b" in highscores[9]:
highscore10Text = monitorFontBold.render(">>> 10. " + names[9] + " -- " + highscores[9].split(":")[0], True, YELLOW)
## ^^ pretty self-explanatory. The one that is the newly achieved high scores is bolded, hence the ":b".
screen.blit(highscoresText, [x, y])
screen.blit(highscore1Text, [x, y+40])
screen.blit(highscore2Text, [x, y+60])
screen.blit(highscore3Text, [x, y+80])
screen.blit(highscore4Text, [x, y+100])
screen.blit(highscore5Text, [x, y+120])
screen.blit(highscore6Text, [x, y+140])
screen.blit(highscore7Text, [x, y+160])
screen.blit(highscore8Text, [x, y+180])
screen.blit(highscore9Text, [x, y+200])
screen.blit(highscore10Text, [x, y+220])
## ^^ put them all on the screen right after each other
timeLeft = monitorFontBold.render("Time:", True, WHITE)
mistakesLeft = monitorFontBold.render("Mistakes Remaining:", True, WHITE)
##Define some Images
imgBKGD = pygame.image.load("metal texture.png").convert()
## Define some variables
## most of these are declared here so the game has a starting point.
timer = 30
strikes = 6
startingTimer = math.pi #because why not?
turn = 0
mistakes = 0
score = 0
timeTaken = 0
shamepick = 0
global CORRECTKEY
CORRECTKEY = 0
global keyPressed
keyPressed = ""
resetCheck = False
y_pressed = False
e_pressed = False
s_pressed = False
global highscores
global highscoresINT
meme_hack = False ## a hidden function they can activate in the game :P
## Define some game functions
def timerGraphic(cx, cy, radius):
##This function, along with its arguments, allows for the timer circle to be drawn
#anywhere on the screen, and have all it's pieces stay together.
angle = int(elapsed/timer * 360)
## ^^This basically says, "What percent through their time are they?"
## and then finds what x percent of 360 is, to see what angle the pie
## shape should be drawn to.
points = [(cx, cy)]
points_s = [(cx, cy)]
## ^^ starts two lists
for i in range(0, angle):
x = cx + int(radius*math.cos((i-90)*math.pi/180))
y = cy + int(radius*math.sin((i-90)*math.pi/180))
points.append((x, y))
points.append((cx, cy))
for i in range(0, angle):
x = cx + int((radius-0.7)*math.cos((i-90)*math.pi/180))
y = cy + int((radius-0.7)*math.sin((i-90)*math.pi/180))
points_s.append((x, y))
points_s.append((cx, cy))
## ^^ ^^ Adds points around the outside of the "circle" that will
## be the pie shape. Because pygame doesn't have a filled_pie()
## function, I need to use a workaround like this.
## The points_s list contains an identical polygon, but 0.7 pixels smaller.
pygame.gfxdraw.aacircle(screen, cx, cy, radius+3, DARKRED)
pygame.gfxdraw.filled_circle(screen, cx, cy, radius+3, DARKRED)
pygame.gfxdraw.filled_circle(screen, cx, cy, radius, LIGHTRED)
if len(points) > 2:
pygame.gfxdraw.filled_polygon(screen, points_s, DARKRED)
pygame.gfxdraw.aapolygon(screen, points, DARKRED)
pygame.gfxdraw.aacircle(screen, cx, cy, radius, DARKRED)
pygame.gfxdraw.aacircle(screen, cx, cy, (radius-1), DARKRED)
## ^^ those just draw the shapes that alias and hide rough edges of the
## un antialiasable shapes, like the polygon.
## So, for example of this whole function, the player has used up 12.4 seconds,
## and pressed 6 keys. Their total time will now be 33 seconds, because of the
## +0.5 seconds for each key.
## That means that the circle needs to fill (12.4/33*100 = 37.57)% of itself.
## For a circle, that means multiplying 0.3757 by 360. In this case, 135 degrees.
## Then, the for loop takes the list of the center points, and adds 135 points,
## plus the center point again to close the polygon.
## This drawing uses a trick: Instead of drawing a light circle and emptying it,
## since that would involve knowing every point on the circle and pulling all but
## the first and last out over time, it draws a light circle with a dark circle
## over top of it, and covers it up. That's why there are so many layers: to
## conceal the gap between the two.
screen.blit(timeLeft, [cx-radius*1.75, cy-(radius/9)])
def lifeGraphic(cx, cy, h, w):
| |
`Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/api/v1/experiments/{id}/checkpoints', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1GetExperimentCheckpointsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def determined_get_experiment_labels(self, **kwargs): # noqa: E501
"""Get a list of unique experiment labels (sorted by popularity). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.determined_get_experiment_labels(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1GetExperimentLabelsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.determined_get_experiment_labels_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.determined_get_experiment_labels_with_http_info(**kwargs) # noqa: E501
return data
def determined_get_experiment_labels_with_http_info(self, **kwargs): # noqa: E501
"""Get a list of unique experiment labels (sorted by popularity). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.determined_get_experiment_labels_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1GetExperimentLabelsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method determined_get_experiment_labels" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/api/v1/experiment/labels', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1GetExperimentLabelsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def determined_get_experiment_trials(self, experiment_id, **kwargs): # noqa: E501
"""Get the list of trials for an experiment. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.determined_get_experiment_trials(experiment_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int experiment_id: Limit trials to those that are owned by the specified experiments. (required)
:param str sort_by: Sort trials by the given field. - SORT_BY_UNSPECIFIED: Returns trials in an unsorted list. - SORT_BY_ID: Returns trials sorted by id. - SORT_BY_START_TIME: Return trials sorted by start time. - SORT_BY_END_TIME: Return trials sorted by end time. Trials without end times are returned after trials that are. - SORT_BY_STATE: Return trials sorted by state. - SORT_BY_BEST_VALIDATION_METRIC: Return the trials sorted by the best metric so far, where the metric is specified by `searcher.metric` in the experiment configuration. - SORT_BY_LATEST_VALIDATION_METRIC: Return the trials sorted by the latest metric so far, where the metric is specified by `searcher.metric` in the experiment configuration. - SORT_BY_BATCHES_PROCESSED: Return the trials sorted by the number of batches completed. - SORT_BY_DURATION: Return the trials sorted by the total duration.
:param str order_by: Order trials in either ascending or descending order. - ORDER_BY_UNSPECIFIED: Returns records in no specific order. - ORDER_BY_ASC: Returns records in ascending order. - ORDER_BY_DESC: Returns records in descending order.
:param int offset: Skip the number of trials before returning results. Negative values denote number of trials to skip from the end before returning results.
:param int limit: Limit the number of trials. A value of 0 denotes no limit.
:param list[str] states: Limit trials to those that match the provided state. - STATE_UNSPECIFIED: The state of the experiment is unknown. - STATE_ACTIVE: The experiment is in an active state. - STATE_PAUSED: The experiment is in a paused state - STATE_STOPPING_COMPLETED: The experiment is completed and is shutting down. - STATE_STOPPING_CANCELED: The experiment is canceled and is shutting down. - STATE_STOPPING_ERROR: The experiment is errored and is shutting down. - STATE_COMPLETED: The experiment is completed and is shut down. - STATE_CANCELED: The experiment is canceled and is shut down. - STATE_ERROR: The experiment is errored and is shut down. - STATE_DELETED: The experiment has been deleted.
:return: V1GetExperimentTrialsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.determined_get_experiment_trials_with_http_info(experiment_id, **kwargs) # noqa: E501
else:
(data) = self.determined_get_experiment_trials_with_http_info(experiment_id, **kwargs) # noqa: E501
return data
def determined_get_experiment_trials_with_http_info(self, experiment_id, **kwargs): # noqa: E501
"""Get the list of trials for an experiment. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.determined_get_experiment_trials_with_http_info(experiment_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int experiment_id: Limit trials to those that are owned by the specified experiments. (required)
:param str sort_by: Sort trials by the given field. - SORT_BY_UNSPECIFIED: Returns trials in an unsorted list. - SORT_BY_ID: Returns trials sorted by id. - SORT_BY_START_TIME: Return trials sorted by start time. - SORT_BY_END_TIME: Return trials sorted by end time. Trials without end times are returned after trials that are. - SORT_BY_STATE: Return trials sorted by state. - SORT_BY_BEST_VALIDATION_METRIC: Return the trials sorted by the best metric so far, where the metric is specified by `searcher.metric` in the experiment configuration. - SORT_BY_LATEST_VALIDATION_METRIC: Return the trials sorted by the latest metric so far, where the metric is specified by `searcher.metric` in the experiment configuration. - SORT_BY_BATCHES_PROCESSED: Return the trials sorted by the number of batches completed. - SORT_BY_DURATION: Return the trials sorted by the total duration.
:param str order_by: Order trials in either ascending or descending order. - ORDER_BY_UNSPECIFIED: Returns records in no specific order. - ORDER_BY_ASC: Returns records in ascending order. - ORDER_BY_DESC: Returns records in descending order.
:param int offset: Skip the number of trials before returning results. Negative values denote number of trials to skip from the end before returning results.
:param int limit: Limit the number of trials. A value of 0 denotes no limit.
:param list[str] states: Limit trials to those that match the provided state. - STATE_UNSPECIFIED: The state of the experiment is unknown. - STATE_ACTIVE: The experiment is in an active state. - STATE_PAUSED: The experiment is in a paused state - STATE_STOPPING_COMPLETED: The experiment is completed and is shutting down. - STATE_STOPPING_CANCELED: The experiment is canceled and is shutting down. - STATE_STOPPING_ERROR: The experiment is errored and is shutting down. - STATE_COMPLETED: The experiment is completed and is shut down. - STATE_CANCELED: The experiment is canceled and is shut down. - STATE_ERROR: The experiment is errored and is shut down. - STATE_DELETED: The experiment has been deleted.
:return: V1GetExperimentTrialsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['experiment_id', 'sort_by', 'order_by', 'offset', 'limit', 'states'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method determined_get_experiment_trials" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'experiment_id' is set
if ('experiment_id' not in params or
params['experiment_id'] is None):
raise ValueError("Missing the required parameter `experiment_id` when calling `determined_get_experiment_trials`") # noqa: E501
collection_formats = {}
path_params = {}
if 'experiment_id' in params:
path_params['experimentId'] = params['experiment_id'] # noqa: E501
query_params = []
if 'sort_by' in params:
query_params.append(('sortBy', params['sort_by'])) # noqa: E501
if 'order_by' in params:
query_params.append(('orderBy', params['order_by'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'states' in params:
query_params.append(('states', params['states'])) # noqa: E501
collection_formats['states'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
| |
to the nearest integer multiple of specified significance,
with negative numbers rounding toward or away from 0 depending on the mode.
Learn more:
https//support.google.comhttps://support.google.com/docs/answer/9061444
"""
return Function("FLOOR_MATH", args)
def FLOOR_PRECISE(*args) -> Function:
"""
The FLOOR.PRECISE function rounds a number down to the nearest integer or
multiple of specified significance.
Learn more:
https//support.google.comhttps://support.google.com/docs/answer/9116270.
"""
return Function("FLOOR_PRECISE", args)
def GAMMALN(*args) -> Function:
"""
Returns the the logarithm of a specified Gamma function, base e (Euler's
number).
Learn more: https//support.google.com/docs/answer/3093416
"""
return Function("GAMMALN", args)
def GAMMALN_PRECISE(*args) -> Function:
"See GAMMALN"
return Function("GAMMALN_PRECISE", args)
def GCD(*args) -> Function:
"""
Returns the greatest common divisor of one or more integers.
Learn more: https//support.google.com/docs/answer/3093489
"""
return Function("GCD", args)
def IMLN(*args) -> Function:
"""
Returns the logarithm of a complex number, base e (Euler's number).
Learn more: https//support.google.com/docs/answer/9000651
"""
return Function("IMLN", args)
def IMPOWER(*args) -> Function:
"""
Returns a complex number raised to a power.
Learn more: https//support.google.com/docs/answer/9003065
"""
return Function("IMPOWER", args)
def IMSQRT(*args) -> Function:
"""
Computes the square root of a complex number.
Learn more: https//support.google.com/docs/answer/9003168
"""
return Function("IMSQRT", args)
def INT(*args) -> Function:
"""
Rounds a number down to the nearest integer that is less than or equal to it.
Learn more: https//support.google.com/docs/answer/3093490
"""
return Function("INT", args)
def ISEVEN(*args) -> Function:
"""
Checks whether the provided value is even.
Learn more: https//support.google.com/docs/answer/3093419
"""
return Function("ISEVEN", args)
def ISO_CEILING(*args) -> Function:
"See CEILING.PRECISE"
return Function("ISO_CEILING", args)
def ISODD(*args) -> Function:
"""
Checks whether the provided value is odd.
Learn more: https//support.google.com/docs/answer/3093491
"""
return Function("ISODD", args)
def LCM(*args) -> Function:
"""
Returns the least common multiple of one or more integers.
Learn more: https//support.google.com/docs/answer/3093421
"""
return Function("LCM", args)
def LN(*args) -> Function:
"""
Returns the the logarithm of a number, base e (Euler's number).
Learn more: https//support.google.com/docs/answer/3093422
"""
return Function("LN", args)
def LOG(*args) -> Function:
"""
Returns the the logarithm of a number given a base.
Learn more: https//support.google.com/docs/answer/3093495
"""
return Function("LOG", args)
def LOG10(*args) -> Function:
"""
Returns the the logarithm of a number, base 10.
Learn more: https//support.google.com/docs/answer/3093423
"""
return Function("LOG10", args)
def MOD(*args) -> Function:
"""
Returns the result of the modulo operator, the remainder after a division
operation.
Learn more: https//support.google.com/docs/answer/3093497
"""
return Function("MOD", args)
def MROUND(*args) -> Function:
"""
Rounds one number to the nearest integer multiple of another.
Learn more: https//support.google.com/docs/answer/3093426
"""
return Function("MROUND", args)
def MULTINOMIAL(*args) -> Function:
"""
Returns the factorial of the sum of values divided by the product of the values'
factorials.
Learn more: https//support.google.com/docs/answer/3093429
"""
return Function("MULTINOMIAL", args)
def MUNIT(*args) -> Function:
"""
Returns a unit matrix of size dimension x dimension.
Learn more: https//support.google.com/docs/answer/9368156.
"""
return Function("MUNIT", args)
def ODD(*args) -> Function:
"""
Rounds a number up to the nearest odd integer.
Learn more: https//support.google.com/docs/answer/3093499
"""
return Function("ODD", args)
def PI(*args) -> Function:
"""
Returns the value of Pi to 14 decimal places.
Learn more: https//support.google.com/docs/answer/3093432
"""
return Function("PI", args)
def POWER(*args) -> Function:
"""
Returns a number raised to a power.
Learn more: https//support.google.com/docs/answer/3093433
"""
return Function("POWER", args)
def PRODUCT(*args) -> Function:
"""
Returns the result of multiplying a series of numbers together.
Learn more: https//support.google.com/docs/answer/3093502
"""
return Function("PRODUCT", args)
def QUOTIENT(*args) -> Function:
"""
Returns one number divided by another.
Learn more: https//support.google.com/docs/answer/3093436
"""
return Function("QUOTIENT", args)
def RADIANS(*args) -> Function:
"""
Converts an angle value in degrees to radians.
Learn more: https//support.google.com/docs/answer/3093437
"""
return Function("RADIANS", args)
def RAND(*args) -> Function:
"""
Returns a random number between 0 inclusive and 1 exclusive.
Learn more: https//support.google.com/docs/answer/3093438
"""
return Function("RAND", args)
def RANDARRAY(*args) -> Function:
"""
Generates an array of random numbers between 0 and 1.
Learn more: https//support.google.com/docs/answer/9211904.
"""
return Function("RANDARRAY", args)
def RANDBETWEEN(*args) -> Function:
"""
Returns a uniformly random integer between two values, inclusive.
Learn more: https//support.google.com/docs/answer/3093507
"""
return Function("RANDBETWEEN", args)
def ROUND(*args) -> Function:
"""
Rounds a number to a certain number of decimal places according to standard
rules.
Learn more: https//support.google.com/docs/answer/3093440
"""
return Function("ROUND", args)
def ROUNDDOWN(*args) -> Function:
"""
Rounds a number to a certain number of decimal places, always rounding down to
the next valid increment.
Learn more: https//support.google.com/docs/answer/3093442
"""
return Function("ROUNDDOWN", args)
def ROUNDUP(*args) -> Function:
"""
Rounds a number to a certain number of decimal places, always rounding up to the
next valid increment.
Learn more: https//support.google.com/docs/answer/3093443
"""
return Function("ROUNDUP", args)
def SEC(*args) -> Function:
"""
The SEC function returns the secant of an angle, measured in radians.
Learn more: https//support.google.com/docs/answer/9116395.
"""
return Function("SEC", args)
def SECH(*args) -> Function:
"""
The SECH function returns the hyperbolic secant of an angle.
Learn more: https//support.google.com/docs/answer/9116560
"""
return Function("SECH", args)
def SEQUENCE(*args) -> Function:
"""
Returns an array of sequential numbers, such as 1, 2, 3, 4.
Learn more: https//support.google.com/docs/answer/9368244.
"""
return Function("SEQUENCE", args)
def SERIESSUM(*args) -> Function:
"""
Given parameters x, n, m, and a, returns the power series sum a1xn + a2x(n+m) +
... + aix(n+(i-1)m), where i is the number of entries in range `a`.
Learn more: https//support.google.com/docs/answer/3093444
"""
return Function("SERIESSUM", args)
def SIGN(*args) -> Function:
"""
Given an input number, returns `-1` if it is negative, `1` if positive, and `0`
if it is zero.
Learn more: https//support.google.com/docs/answer/3093513
"""
return Function("SIGN", args)
def SIN(*args) -> Function:
"""
Returns the sine of an angle provided in radians.
Learn more: https//support.google.com/docs/answer/3093447
"""
return Function("SIN", args)
def SINH(*args) -> Function:
"""
Returns the hyperbolic sine of any real number.
Learn more: https//support.google.com/docs/answer/3093517
"""
return Function("SINH", args)
def SQRT(*args) -> Function:
"""
Returns the positive square root of a positive number.
Learn more: https//support.google.com/docs/answer/3093577
"""
return Function("SQRT", args)
def SQRTPI(*args) -> Function:
"""
Returns the positive square root of the product of Pi and the given positive
number.
Learn more: https//support.google.com/docs/answer/3093579
"""
return Function("SQRTPI", args)
def SUBTOTAL(*args) -> Function:
"""
Returns a subtotal for a vertical range of cells using a specified aggregation
function.
Learn more: https//support.google.com/docs/answer/3093649
"""
return Function("SUBTOTAL", args)
def SUM(*args) -> Function:
"""
Returns the sum of a series of numbers and/or cells.
Learn more: https//support.google.com/docs/answer/3093669
"""
return Function("SUM", args)
def SUMIF(*args) -> Function:
"""
Returns a conditional sum across a range.
Learn more: https//support.google.com/docs/answer/3093583
"""
return Function("SUMIF", args)
def SUMIFS(*args) -> Function:
"""
Returns the sum of a range depending on multiple criteria.
Learn more: https//support.google.com/docs/answer/3238496
"""
return Function("SUMIFS", args)
def SUMSQ(*args) -> Function:
"""
Returns the sum of the squares of a series of numbers and/or cells.
Learn more: https//support.google.com/docs/answer/3093714
"""
return Function("SUMSQ", args)
def TAN(*args) -> Function:
"""
Returns the tangent of an angle provided in radians.
Learn more: https//support.google.com/docs/answer/3093586
"""
return Function("TAN", args)
def TANH(*args) -> Function:
"""
Returns the hyperbolic tangent of any real number.
Learn more: https//support.google.com/docs/answer/3093755
"""
return Function("TANH", args)
def TRUNC(*args) -> Function:
"""
Truncates a number to a certain number of significant digits by omitting less
significant digits.
Learn more: https//support.google.com/docs/answer/3093588
"""
return Function("TRUNC", args)
def ADD(*args) -> Function:
"""
Returns the sum of two numbers. Equivalent to the `+` operator.
Learn more: https//support.google.com/docs/answer/3093590
"""
return Function("ADD", args)
def CONCAT(*args) -> Function:
"""
Returns the concatenation of two values. Equivalent to the `&` operator.
Learn more: https//support.google.com/docs/answer/3093592
"""
return Function("CONCAT", args)
def DIVIDE(*args) -> Function:
"""
Returns one number divided by another. Equivalent to the `/` operator.
Learn more: https//support.google.com/docs/answer/3093973
"""
return Function("DIVIDE", args)
def EQ(*args) -> Function:
"""
Returns `TRUE` if two specified values are equal and `FALSE` otherwise.
Equivalent to the `=` operator.
Learn more: https//support.google.com/docs/answer/3093593
"""
return Function("EQ", args)
def GT(*args) -> Function:
"""
Returns `TRUE` if the first argument is strictly greater than the second, and
`FALSE` otherwise. Equivalent to the `>` operator.
Learn more: https//support.google.com/docs/answer/3098240
"""
return Function("GT", args)
def GTE(*args) -> Function:
"""
Returns `TRUE` if the first argument is greater than or equal to the second, and
`FALSE` otherwise. Equivalent to the `>=` operator.
Learn more: https//support.google.com/docs/answer/3093975
"""
return Function("GTE", args)
def LT(*args) -> Function:
"""
Returns `TRUE` if the first argument is strictly less than the second, and
`FALSE` otherwise. Equivalent to the `<` operator.
Learn more: https//support.google.com/docs/answer/3093596
"""
return Function("LT", args)
def LTE(*args) -> Function:
"""
Returns `TRUE` if the first argument is less than or equal to the second, and
`FALSE` otherwise. | |
""" mc5005.py
A python module to control Faulhaber motors via the MC5005 Motion Controller and serial interface.
- Setup your motor with Motion Manager first.
- Install pyserial from PyPI
Now you can use this module to control the motor via serial interface (needs setup first of course).
python 3 support only
Copyright (2020) <NAME>
Licence: MIT
Adapted by <NAME> on 4/7/2021. Now more than 1 motor can be controlled through
the same PORT (@Benno Meier work) and can be be operated in semi-synchronous mode.
"""
import serial
import struct
import time
OPERATION_MODE = 0x6060 # operation mode
OPERATION_MODE_DISP = 0x6061 # operation mode display
def dump(x):
return ''.join([type(x).__name__, "('",*['\\x'+'{:02x}'.format(i) for i in x], "')"])
class Controller(object):
"""This class represents a Faulhaber MC5005 Motion Controller
It handles all the communication with the controller, and needs
to be given to motor objects on initialization.
"""
def __init__(self, port, baudrate = 115200, timeout = 2):
"""Initialize the interface to the motion controller
port : Serial port, i.e. "COM4"
baudrate : Optional keyword argument, preset to 115200
timeout : Optional keyword argument, preset to 2
"""
self.ser = serial.Serial(port, baudrate, timeout = timeout)
self.S = b'\x53'
self.E = b'\x45'
self.GET = b'\x01'
self.SET = b'\x02'
def close(self):
self.ser.close()
def CRC(self, msg):
"""Calculate Cyclic Redundancy Check for message msg.
msg is the entire command without SOF and EOF."""
poly = 0xd5
crc = 0xff
for byte in msg:
crc = crc ^ byte
for _ in range(8):
if crc & 0x01:
crc = ((crc >> 1) ^ poly)
else:
crc >>= 1
return struct.pack("B", crc)
def write(self, command):
"""Write command. The length of the command is
length of the argument + 1 for the length byte + 1 for the CRC byte"""
command = struct.pack("B", len(command) + 2) + command
command = self.S + command + self.CRC(command) + self.E
# time.sleep(0.2)
self.ser.flushOutput()
self.ser.flushInput()
time.sleep(0.)
self.ser.write(command)
time.sleep(0.)
#print(dump(command))
return self.read()
def read(self):
"""First read the start bit and the length,
then read the rest of the transmission."""
ans = self.ser.read(2)
try:
length = ans[1]
except:
print("Error: Ans: ", ans)
ansAll = ans + self.ser.read(length)
#print(dump(ansAll))
#check CRC is correct
assert self.CRC(ansAll[1:-2]) == struct.pack("B", ansAll[-2])
# ansAll includes self.S, so data starts at position 7
return ansAll[7:-2]
def readRegister(self, address, node = b'\x01', subindex = 0, debug = False):
"""Read Register
address: address of register to be read
node = b'\x01' optional node
sudindex = 0 optional subindex
"""
command = node + self.GET + int.to_bytes(address, 2, 'little') + int.to_bytes(subindex, 1, 'little')
if debug:
print(dump(command))
return self.write(command)
def setRegister(self, address, value, length, node = b'\x01', subindex = 0):
"""set register address: two byte address of the register, i.e. 0x6040
value: value of the register length: length of the register, in bytes"""
command = ( node + self.SET
+ int.to_bytes(address, 2, 'little')
+ int.to_bytes(subindex, 1, 'little')
+ int.to_bytes(value, length, 'little',signed=True))
# print(dump(command))
# print(command)
self.write(command)
def getCastedRegister(self, address, subindex = 0):
return hex(int.from_bytes(C.readRegister(address, subindex = subindex), byteorder='little'))
def printStatus(self):
print("Status: ", self.getCastedRegister(0x6041))
#--------------------------------------------
# SetDigOut(PinNr)
# Will set the digital output 1 or 2 to high. Shutdown hall sensors.
#--------------------------------------------
def SetDigOut(self, PinNr):
if PinNr==1:
self.setRegister(0x2311, 0xfd, 2, node = b'\x01',subindex=4)
if PinNr==2:
self.setRegister(0x2311, 0xf7, 2, node = b'\x01',subindex=4)
#--------------------------------------------
# ClearDigOut(PinNr)
# Will clear the digital output 1 or 2 to low. Switch on hall sensors.
#--------------------------------------------
def ClearDigOut(self, PinNr):
if PinNr==1:
self.setRegister(0x2311, 0xfc, 2, node = b'\x01',subindex=4)
if PinNr==2:
self.setRegister(0x2311, 0xf3, 2, node = b'\x01',subindex=4)
class Motor(Controller):
"""This class is an interface to a Faulhaber Motor. You need to give it a controller
object upon initialization, and optionally the node to which the motor connects."""
def __init__(self, controller, node = b'\x01'):
self.controller = controller
self.node = node
def getPosition(self):
answer = self.controller.readRegister(0x6064, node = self.node, debug = False)
position = int.from_bytes(answer, byteorder='little', signed = True)
return position
def setControlWord(self, word):
self.controller.setRegister(0x6040, word, 2, node = self.node)
def setMaxSpeed(self, value=6000):
self.controller.setRegister(0x6080, value, 2, node = self.node)
def setTarget(self, value):
self.controller.setRegister(0x607a, value, 4, node = self.node)
def getTargetPositionSource(self):
return self.controller.readRegister(0x2331, subindex = 4, node = self.node)
def setPositionMode(self):
#self.setRegister(0x6060, 1, 1)
command = self.node + b'\x02' + b'\x60\x60\x00' + b'\x01'
self.controller.write(command)
def shutDown(self):
self.setControlWord(0x06)
def switchOn(self):
self.setControlWord(0x07)
def enable(self):
self.setControlWord(0x0f)
def DisableVoltage(self):
self.setControlWord(0x00)
def positionAbsolute(self, value):
"""set absolute position. Make sure the device is in position mode prior to using
this function."""
self.setTarget(value)
self.setControlWord(0x0f)
self.setControlWord(0x3f)
def positionRelative(self, value):
"""set relative position. Make sure the device is in position mode prior to using this function."""
self.setTarget(value)
self.setControlWord(0x0f)
self.setControlWord(0x7f)
"""
--------------------------------------------
Enable()
will start the CiA 402 state machine or re-enable
the control. Returns only after the OperationEnabled
state is reached. Adapted from FA.
--------------------------------------------
"""
def Enable2(self):
EnState = 0 #reset the local step counter
CiAStatusword = int(self.getCastedRegister(0x6041),base=16) #initial check of the status word
CiAStatusMask = 0x6f
CiAStatus = CiAStatusword & CiAStatusMask
CiAStatus_OperationEnabled = 0x27
CiAStatus_SwitchOnDisabled = 0x40
CiAStatus_QuickStop = 0x07
#check for being in stopped mode
if CiAStatus == CiAStatus_QuickStop:
self.setControlWord(0x0f) #Enable Operation
EnState = 1
elif CiAStatus == CiAStatus_OperationEnabled: #drive is already enabled
EnState = 2
elif CiAStatus != CiAStatus_SwitchOnDisabled: # otherwise it's safe to disable first
# we need to send a shutdown first
self.setControlWord(0x00) #Controlword = CiACmdDisableVoltage
while EnState != 2:
CiAStatusword = int(self.getCastedRegister(0x6041),base=16)
CiAStatusMask = 0x6f
CiAStatus = (CiAStatusword & CiAStatusMask) #cyclically check the status word
if EnState == 0:
if CiAStatus == 0x40:
#send the enable signature
self.setControlWord(0x06) #CiACmdShutdown
self.setControlWord(0x0f) #CiACmdEnableOperation
#now wait for being enabled
EnState = 1
elif EnState == 1:
#wait for enabled
if CiAStatus == CiAStatus_OperationEnabled:
EnState = 2
"""
--------------------------------------------
Disable()
Will stop the drive and shut the
CiA 402 state machine down TO the initial state
returns only after the initial state (Switch On Disabled)
is reached. Adapted from FA.
---------------------------------------------
"""
def Disable2(self):
DiState = 0 #reset the local step counter
CiAStatusword = int(self.getCastedRegister(0x6041),base=16) #initial check of the status word
CiAStatusMask = 0x6f
CiAStatus = CiAStatusword & CiAStatusMask
CiAStatus_OperationEnabled = 0x27
if CiAStatus == CiAStatus_OperationEnabled:
#send a shutdown command first to stop the motor
self.setControlWord(0x07) #CiACmdDisable
DiState = 1
else:
#otherwise the disable voltage is the next command
#out of quick-stop or switched on.
DiState = 2
while DiState != 4:
CiAStatusword = int(self.getCastedRegister(0x6041),base=16)
CiAStatusMask = 0x6f
CiAStatus = (CiAStatusword & CiAStatusMask) #cyclically check the status
if DiState == 1:
if CiAStatus == 0x23:
#only now it's safe to send the disable voltage command
DiState = 2
elif DiState == 2:
#wait for enabled
self.setControlWord(0x00) #CiACmdDisableVoltage
DiState = 3
elif DiState == 3:
#wait for final state
if CiAStatus == 0x40:
DiState = 4
# if __name__ == "__main__":
# C = Controller("COM4")
# # C.ClearDigOut(1) #Power on Hall sensors
# print("Device Type: ", C.getCastedRegister(0x1000))
# print("Serial Number: ", C.getCastedRegister(0x1018, subindex = 4))
# print("Status: ", C.getCastedRegister(0x6041))
# print("Modes of Operation: ", C.getCastedRegister(0x6060))
# print("Modes of Operation Display: ", C.getCastedRegister(0x6061))
# print("Producer Heartbeat Time: ", C.getCastedRegister(0x1017))
# print("Actual Program Position: ", C.getCastedRegister(0x3001, subindex = 3))
# print("Actual Program State: ", C.getCastedRegister(0x3001, subindex = 4))
# print("Error State: ", C.getCastedRegister(0x3001, subindex = 8))
# print("Error code: ", C.getCastedRegister(0x3001, subindex = 9))
# print("Motor Type: ", C.getCastedRegister(0x2329, subindex = 0x0b))
# print("Encoder Increments: ", C.getCastedRegister(0x608f, subindex = 1))
# print("Serial Number: ", C.getCastedRegister(0x1018, subindex = 4))
# print("Feed Constant: ", C.getCastedRegister(0x6092, subindex = 1))
# C.printStatus()
# print("\n\nPreparing Device.\n" + "="*20)
# X1 = Motor(C, node = b'\x01') #vertical axis
# Y1 = Motor(C, node = b'\x02') #horizontal axis
# X1.setPositionMode() #set PP mode
# Y1.setPositionMode() #set PP mode
# C.printStatus()
# print("Restarting Devices.")
# enable_1(X1)
# enable_1(Y1)
# print("Restart Complete.")
# C.printStatus()
# print("")
def enable_1(NodeNr): #simple enable of axis
NodeNr.shutDown()
NodeNr.switchOn()
NodeNr.enable()
print ('Enable axis '+str(NodeNr)+' successful')
C = Controller("/dev/ttyUSB2")
# C.ClearDigOut(1) #Power on Hall sensors
print("Device Type: ", C.getCastedRegister(0x1000))
print("Serial Number: ", C.getCastedRegister(0x1018, subindex = 4))
print("Status: ", C.getCastedRegister(0x6041))
print("Modes of Operation: ", C.getCastedRegister(0x6060))
print("Modes of Operation Display: ", C.getCastedRegister(0x6061))
print("Producer Heartbeat Time: ", C.getCastedRegister(0x1017))
print("Actual Program Position: ", C.getCastedRegister(0x3001, subindex = 3))
print("Actual Program State: ", C.getCastedRegister(0x3001, subindex = 4))
print("Error State: ", C.getCastedRegister(0x3001, subindex = 8))
print("Error code: ", | |
'p2':4790009})
create_and_post({'t':'line', 'p1':4790009, 'p2':4790011})
create_and_post({'t':'line', 'p1':4790011, 'p2':4780010})
create_and_post({'t':'line', 'p1':4790011, 'p2':4770011})
create_and_post({'t':'line', 'p1':4770011, 'p2':4760011})
create_and_post({'t':'line', 'p1':4760011, 'p2':4750011})
create_and_post({'t':'line', 'p1':4750011, 'p2':4740010})
create_and_post({'t':'line', 'p1':4670003, 'p2':4680003})
create_and_post({'t':'line', 'p1':4650005, 'p2':4650006})
create_and_post({'t':'line', 'p1':4700005, 'p2':4700006})
create_and_post({'t':'line', 'p1':4670008, 'p2':4680008})
create_and_post({'t':'line', 'p1':4850003, 'p2':4860003})
create_and_post({'t':'line', 'p1':4830005, 'p2':4830006})
create_and_post({'t':'line', 'p1':4880005, 'p2':4880006})
create_and_post({'t':'line', 'p1':4850008, 'p2':4860008})
create_and_post({'t':'line', 'p1':4800000, 'p2':4820000})
create_and_post({'t':'line', 'p1':4820000, 'p2':4830000})
create_and_post({'t':'line', 'p1':4830000, 'p2':4840000})
create_and_post({'t':'line', 'p1':4800000, 'p2':4800002})
create_and_post({'t':'line', 'p1':4800002, 'p2':4800003})
create_and_post({'t':'line', 'p1':4800003, 'p2':4800004})
create_and_post({'t':'line', 'p1':4800004, 'p2':4840004})
create_and_post({'t':'line', 'p1':4840004, 'p2':4840000})
create_and_post({'t':'line', 'p1':4840000, 'p2':4850001})
create_and_post({'t':'line', 'p1':4850001, 'p2':4850002})
create_and_post({'t':'line', 'p1':4850002, 'p2':4850003})
create_and_post({'t':'line', 'p1':4850003, 'p2':4850005})
create_and_post({'t':'line', 'p1':4850005, 'p2':4840004})
create_and_post({'t':'line', 'p1':4850005, 'p2':4830005})
create_and_post({'t':'line', 'p1':4830005, 'p2':4820005})
create_and_post({'t':'line', 'p1':4820005, 'p2':4810005})
create_and_post({'t':'line', 'p1':4810005, 'p2':4800004})
create_and_post({'t':'line', 'p1':4860000, 'p2':4880000})
create_and_post({'t':'line', 'p1':4880000, 'p2':4890000})
create_and_post({'t':'line', 'p1':4890000, 'p2':4900000})
create_and_post({'t':'line', 'p1':4860000, 'p2':4860002})
create_and_post({'t':'line', 'p1':4860002, 'p2':4860003})
create_and_post({'t':'line', 'p1':4860003, 'p2':4860004})
create_and_post({'t':'line', 'p1':4860004, 'p2':4900004})
create_and_post({'t':'line', 'p1':4900004, 'p2':4900000})
create_and_post({'t':'line', 'p1':4900000, 'p2':4910001})
create_and_post({'t':'line', 'p1':4910001, 'p2':4910002})
create_and_post({'t':'line', 'p1':4910002, 'p2':4910003})
create_and_post({'t':'line', 'p1':4910003, 'p2':4910005})
create_and_post({'t':'line', 'p1':4910005, 'p2':4900004})
create_and_post({'t':'line', 'p1':4910005, 'p2':4890005})
create_and_post({'t':'line', 'p1':4890005, 'p2':4880005})
create_and_post({'t':'line', 'p1':4880005, 'p2':4870005})
create_and_post({'t':'line', 'p1':4870005, 'p2':4860004})
create_and_post({'t':'line', 'p1':4800006, 'p2':4820006})
create_and_post({'t':'line', 'p1':4820006, 'p2':4830006})
create_and_post({'t':'line', 'p1':4830006, 'p2':4840006})
create_and_post({'t':'line', 'p1':4800006, 'p2':4800008})
create_and_post({'t':'line', 'p1':4800008, 'p2':4800009})
create_and_post({'t':'line', 'p1':4800009, 'p2':4800010})
create_and_post({'t':'line', 'p1':4800010, 'p2':4840010})
create_and_post({'t':'line', 'p1':4840010, 'p2':4840006})
create_and_post({'t':'line', 'p1':4840006, 'p2':4850007})
create_and_post({'t':'line', 'p1':4850007, 'p2':4850008})
create_and_post({'t':'line', 'p1':4850008, 'p2':4850009})
create_and_post({'t':'line', 'p1':4850009, 'p2':4850011})
create_and_post({'t':'line', 'p1':4850011, 'p2':4840010})
create_and_post({'t':'line', 'p1':4850011, 'p2':4830011})
create_and_post({'t':'line', 'p1':4830011, 'p2':4820011})
create_and_post({'t':'line', 'p1':4820011, 'p2':4810011})
create_and_post({'t':'line', 'p1':4810011, 'p2':4800010})
create_and_post({'t':'line', 'p1':4860006, 'p2':4880006})
create_and_post({'t':'line', 'p1':4880006, 'p2':4890006})
create_and_post({'t':'line', 'p1':4890006, 'p2':4900006})
create_and_post({'t':'line', 'p1':4860006, 'p2':4860008})
create_and_post({'t':'line', 'p1':4860008, 'p2':4860009})
create_and_post({'t':'line', 'p1':4860009, 'p2':4860010})
create_and_post({'t':'line', 'p1':4860010, 'p2':4900010})
create_and_post({'t':'line', 'p1':4900010, 'p2':4900006})
create_and_post({'t':'line', 'p1':4900006, 'p2':4910007})
create_and_post({'t':'line', 'p1':4910007, 'p2':4910008})
create_and_post({'t':'line', 'p1':4910008, 'p2':4910009})
create_and_post({'t':'line', 'p1':4910009, 'p2':4910011})
create_and_post({'t':'line', 'p1':4910011, 'p2':4900010})
create_and_post({'t':'line', 'p1':4910011, 'p2':4890011})
create_and_post({'t':'line', 'p1':4890011, 'p2':4880011})
create_and_post({'t':'line', 'p1':4880011, 'p2':4870011})
create_and_post({'t':'line', 'p1':4870011, 'p2':4860010})
create_and_post({'t':'line', 'p1':4790003, 'p2':4800003})
create_and_post({'t':'line', 'p1':4770005, 'p2':4770006})
create_and_post({'t':'line', 'p1':4820005, 'p2':4820006})
create_and_post({'t':'line', 'p1':4790008, 'p2':4800008})
create_and_post({'t':'line', 'p1':4970003, 'p2':4980003})
create_and_post({'t':'line', 'p1':4950005, 'p2':4950006})
create_and_post({'t':'line', 'p1':5000005, 'p2':5000006})
create_and_post({'t':'line', 'p1':4970008, 'p2':4980008})
create_and_post({'t':'line', 'p1':4920000, 'p2':4940000})
create_and_post({'t':'line', 'p1':4940000, 'p2':4950000})
create_and_post({'t':'line', 'p1':4950000, 'p2':4960000})
create_and_post({'t':'line', 'p1':4920000, 'p2':4920002})
create_and_post({'t':'line', 'p1':4920002, 'p2':4920003})
create_and_post({'t':'line', 'p1':4920003, 'p2':4920004})
create_and_post({'t':'line', 'p1':4920004, 'p2':4960004})
create_and_post({'t':'line', 'p1':4960004, 'p2':4960000})
create_and_post({'t':'line', 'p1':4960000, 'p2':4970001})
create_and_post({'t':'line', 'p1':4970001, 'p2':4970002})
create_and_post({'t':'line', 'p1':4970002, 'p2':4970003})
create_and_post({'t':'line', 'p1':4970003, 'p2':4970005})
create_and_post({'t':'line', 'p1':4970005, 'p2':4960004})
create_and_post({'t':'line', 'p1':4970005, 'p2':4950005})
create_and_post({'t':'line', 'p1':4950005, 'p2':4940005})
create_and_post({'t':'line', 'p1':4940005, 'p2':4930005})
create_and_post({'t':'line', 'p1':4930005, 'p2':4920004})
create_and_post({'t':'line', 'p1':4980000, 'p2':5000000})
create_and_post({'t':'line', 'p1':5000000, 'p2':5010000})
create_and_post({'t':'line', 'p1':5010000, 'p2':5020000})
create_and_post({'t':'line', 'p1':4980000, 'p2':4980002})
create_and_post({'t':'line', 'p1':4980002, 'p2':4980003})
create_and_post({'t':'line', 'p1':4980003, 'p2':4980004})
create_and_post({'t':'line', 'p1':4980004, 'p2':5020004})
create_and_post({'t':'line', 'p1':5020004, 'p2':5020000})
create_and_post({'t':'line', 'p1':5020000, 'p2':5030001})
create_and_post({'t':'line', 'p1':5030001, 'p2':5030002})
create_and_post({'t':'line', 'p1':5030002, 'p2':5030003})
create_and_post({'t':'line', 'p1':5030003, 'p2':5030005})
create_and_post({'t':'line', 'p1':5030005, 'p2':5020004})
create_and_post({'t':'line', 'p1':5030005, 'p2':5010005})
create_and_post({'t':'line', 'p1':5010005, 'p2':5000005})
create_and_post({'t':'line', 'p1':5000005, 'p2':4990005})
create_and_post({'t':'line', 'p1':4990005, 'p2':4980004})
create_and_post({'t':'line', 'p1':4920006, 'p2':4940006})
create_and_post({'t':'line', 'p1':4940006, 'p2':4950006})
create_and_post({'t':'line', 'p1':4950006, 'p2':4960006})
create_and_post({'t':'line', 'p1':4920006, 'p2':4920008})
create_and_post({'t':'line', 'p1':4920008, 'p2':4920009})
create_and_post({'t':'line', 'p1':4920009, 'p2':4920010})
create_and_post({'t':'line', 'p1':4920010, 'p2':4960010})
create_and_post({'t':'line', 'p1':4960010, 'p2':4960006})
create_and_post({'t':'line', 'p1':4960006, 'p2':4970007})
create_and_post({'t':'line', 'p1':4970007, 'p2':4970008})
create_and_post({'t':'line', 'p1':4970008, 'p2':4970009})
create_and_post({'t':'line', 'p1':4970009, 'p2':4970011})
create_and_post({'t':'line', 'p1':4970011, 'p2':4960010})
create_and_post({'t':'line', 'p1':4970011, 'p2':4950011})
create_and_post({'t':'line', 'p1':4950011, 'p2':4940011})
create_and_post({'t':'line', 'p1':4940011, 'p2':4930011})
create_and_post({'t':'line', 'p1':4930011, 'p2':4920010})
create_and_post({'t':'line', 'p1':4980006, 'p2':5000006})
create_and_post({'t':'line', 'p1':5000006, 'p2':5010006})
create_and_post({'t':'line', 'p1':5010006, 'p2':5020006})
create_and_post({'t':'line', 'p1':4980006, 'p2':4980008})
create_and_post({'t':'line', 'p1':4980008, 'p2':4980009})
create_and_post({'t':'line', 'p1':4980009, 'p2':4980010})
create_and_post({'t':'line', 'p1':4980010, 'p2':5020010})
create_and_post({'t':'line', 'p1':5020010, 'p2':5020006})
create_and_post({'t':'line', 'p1':5020006, 'p2':5030007})
create_and_post({'t':'line', 'p1':5030007, 'p2':5030008})
create_and_post({'t':'line', 'p1':5030008, 'p2':5030009})
create_and_post({'t':'line', 'p1':5030009, 'p2':5030011})
create_and_post({'t':'line', 'p1':5030011, 'p2':5020010})
create_and_post({'t':'line', 'p1':5030011, 'p2':5010011})
create_and_post({'t':'line', 'p1':5010011, 'p2':5000011})
create_and_post({'t':'line', 'p1':5000011, 'p2':4990011})
create_and_post({'t':'line', 'p1':4990011, 'p2':4980010})
create_and_post({'t':'line', 'p1':4910003, 'p2':4920003})
create_and_post({'t':'line', 'p1':4890005, 'p2':4890006})
create_and_post({'t':'line', 'p1':4940005, 'p2':4940006})
create_and_post({'t':'line', 'p1':4910008, 'p2':4920008})
create_and_post({'t':'line', 'p1':5090003, 'p2':5100003})
create_and_post({'t':'line', 'p1':5070005, 'p2':5070006})
create_and_post({'t':'line', 'p1':5120005, 'p2':5120006})
create_and_post({'t':'line', 'p1':5090008, 'p2':5100008})
create_and_post({'t':'line', 'p1':5040000, 'p2':5060000})
create_and_post({'t':'line', 'p1':5060000, 'p2':5070000})
create_and_post({'t':'line', 'p1':5070000, 'p2':5080000})
create_and_post({'t':'line', 'p1':5040000, 'p2':5040002})
create_and_post({'t':'line', 'p1':5040002, 'p2':5040003})
create_and_post({'t':'line', 'p1':5040003, 'p2':5040004})
create_and_post({'t':'line', 'p1':5040004, 'p2':5080004})
create_and_post({'t':'line', 'p1':5080004, 'p2':5080000})
create_and_post({'t':'line', 'p1':5080000, 'p2':5090001})
create_and_post({'t':'line', 'p1':5090001, 'p2':5090002})
create_and_post({'t':'line', 'p1':5090002, 'p2':5090003})
create_and_post({'t':'line', 'p1':5090003, 'p2':5090005})
create_and_post({'t':'line', 'p1':5090005, 'p2':5080004})
create_and_post({'t':'line', 'p1':5090005, 'p2':5070005})
create_and_post({'t':'line', 'p1':5070005, 'p2':5060005})
create_and_post({'t':'line', 'p1':5060005, 'p2':5050005})
create_and_post({'t':'line', 'p1':5050005, 'p2':5040004})
create_and_post({'t':'line', 'p1':5100000, 'p2':5120000})
create_and_post({'t':'line', 'p1':5120000, 'p2':5130000})
create_and_post({'t':'line', 'p1':5130000, 'p2':5140000})
create_and_post({'t':'line', 'p1':5100000, 'p2':5100002})
create_and_post({'t':'line', 'p1':5100002, 'p2':5100003})
create_and_post({'t':'line', 'p1':5100003, 'p2':5100004})
create_and_post({'t':'line', 'p1':5100004, 'p2':5140004})
create_and_post({'t':'line', 'p1':5140004, 'p2':5140000})
create_and_post({'t':'line', 'p1':5140000, 'p2':5150001})
create_and_post({'t':'line', 'p1':5150001, 'p2':5150002})
create_and_post({'t':'line', 'p1':5150002, 'p2':5150003})
create_and_post({'t':'line', 'p1':5150003, 'p2':5150005})
create_and_post({'t':'line', 'p1':5150005, 'p2':5140004})
create_and_post({'t':'line', 'p1':5150005, 'p2':5130005})
create_and_post({'t':'line', 'p1':5130005, 'p2':5120005})
create_and_post({'t':'line', 'p1':5120005, 'p2':5110005})
create_and_post({'t':'line', 'p1':5110005, 'p2':5100004})
create_and_post({'t':'line', 'p1':5040006, 'p2':5060006})
create_and_post({'t':'line', 'p1':5060006, 'p2':5070006})
create_and_post({'t':'line', 'p1':5070006, 'p2':5080006})
create_and_post({'t':'line', 'p1':5040006, 'p2':5040008})
create_and_post({'t':'line', 'p1':5040008, 'p2':5040009})
create_and_post({'t':'line', 'p1':5040009, 'p2':5040010})
create_and_post({'t':'line', 'p1':5040010, 'p2':5080010})
create_and_post({'t':'line', 'p1':5080010, 'p2':5080006})
create_and_post({'t':'line', 'p1':5080006, 'p2':5090007})
create_and_post({'t':'line', 'p1':5090007, 'p2':5090008})
create_and_post({'t':'line', 'p1':5090008, 'p2':5090009})
create_and_post({'t':'line', 'p1':5090009, 'p2':5090011})
create_and_post({'t':'line', 'p1':5090011, 'p2':5080010})
create_and_post({'t':'line', 'p1':5090011, 'p2':5070011})
create_and_post({'t':'line', 'p1':5070011, 'p2':5060011})
create_and_post({'t':'line', 'p1':5060011, 'p2':5050011})
create_and_post({'t':'line', 'p1':5050011, 'p2':5040010})
create_and_post({'t':'line', 'p1':5100006, 'p2':5120006})
create_and_post({'t':'line', 'p1':5120006, 'p2':5130006})
create_and_post({'t':'line', 'p1':5130006, 'p2':5140006})
create_and_post({'t':'line', 'p1':5100006, 'p2':5100008})
create_and_post({'t':'line', 'p1':5100008, 'p2':5100009})
create_and_post({'t':'line', 'p1':5100009, 'p2':5100010})
create_and_post({'t':'line', 'p1':5100010, 'p2':5140010})
create_and_post({'t':'line', 'p1':5140010, 'p2':5140006})
create_and_post({'t':'line', 'p1':5140006, 'p2':5150007})
create_and_post({'t':'line', 'p1':5150007, 'p2':5150008})
create_and_post({'t':'line', 'p1':5150008, 'p2':5150009})
create_and_post({'t':'line', 'p1':5150009, 'p2':5150011})
create_and_post({'t':'line', 'p1':5150011, 'p2':5140010})
create_and_post({'t':'line', 'p1':5150011, 'p2':5130011})
create_and_post({'t':'line', 'p1':5130011, 'p2':5120011})
create_and_post({'t':'line', 'p1':5120011, 'p2':5110011})
create_and_post({'t':'line', 'p1':5110011, 'p2':5100010})
create_and_post({'t':'line', 'p1':5030003, 'p2':5040003})
create_and_post({'t':'line', 'p1':5010005, 'p2':5010006})
create_and_post({'t':'line', 'p1':5060005, 'p2':5060006})
create_and_post({'t':'line', 'p1':5030008, 'p2':5040008})
create_and_post({'t':'line', 'p1':5210003, 'p2':5220003})
create_and_post({'t':'line', 'p1':5190005, 'p2':5190006})
create_and_post({'t':'line', 'p1':5240005, 'p2':5240006})
create_and_post({'t':'line', 'p1':5210008, 'p2':5220008})
create_and_post({'t':'line', 'p1':5160000, 'p2':5180000})
create_and_post({'t':'line', 'p1':5180000, 'p2':5190000})
create_and_post({'t':'line', 'p1':5190000, 'p2':5200000})
create_and_post({'t':'line', 'p1':5160000, 'p2':5160002})
create_and_post({'t':'line', 'p1':5160002, 'p2':5160003})
create_and_post({'t':'line', 'p1':5160003, 'p2':5160004})
create_and_post({'t':'line', 'p1':5160004, 'p2':5200004})
create_and_post({'t':'line', 'p1':5200004, 'p2':5200000})
create_and_post({'t':'line', 'p1':5200000, 'p2':5210001})
create_and_post({'t':'line', 'p1':5210001, 'p2':5210002})
create_and_post({'t':'line', 'p1':5210002, 'p2':5210003})
create_and_post({'t':'line', 'p1':5210003, 'p2':5210005})
create_and_post({'t':'line', 'p1':5210005, 'p2':5200004})
create_and_post({'t':'line', 'p1':5210005, 'p2':5190005})
create_and_post({'t':'line', 'p1':5190005, 'p2':5180005})
create_and_post({'t':'line', 'p1':5180005, 'p2':5170005})
create_and_post({'t':'line', 'p1':5170005, 'p2':5160004})
create_and_post({'t':'line', 'p1':5220000, 'p2':5240000})
create_and_post({'t':'line', 'p1':5240000, 'p2':5250000})
create_and_post({'t':'line', 'p1':5250000, 'p2':5260000})
create_and_post({'t':'line', 'p1':5220000, 'p2':5220002})
create_and_post({'t':'line', 'p1':5220002, 'p2':5220003})
create_and_post({'t':'line', 'p1':5220003, 'p2':5220004})
create_and_post({'t':'line', 'p1':5220004, 'p2':5260004})
create_and_post({'t':'line', 'p1':5260004, 'p2':5260000})
create_and_post({'t':'line', 'p1':5260000, 'p2':5270001})
create_and_post({'t':'line', 'p1':5270001, 'p2':5270002})
create_and_post({'t':'line', 'p1':5270002, 'p2':5270003})
create_and_post({'t':'line', 'p1':5270003, 'p2':5270005})
create_and_post({'t':'line', 'p1':5270005, 'p2':5260004})
create_and_post({'t':'line', 'p1':5270005, 'p2':5250005})
create_and_post({'t':'line', 'p1':5250005, 'p2':5240005})
create_and_post({'t':'line', 'p1':5240005, 'p2':5230005})
create_and_post({'t':'line', 'p1':5230005, 'p2':5220004})
create_and_post({'t':'line', 'p1':5160006, 'p2':5180006})
create_and_post({'t':'line', 'p1':5180006, 'p2':5190006})
create_and_post({'t':'line', 'p1':5190006, 'p2':5200006})
create_and_post({'t':'line', 'p1':5160006, 'p2':5160008})
create_and_post({'t':'line', 'p1':5160008, 'p2':5160009})
create_and_post({'t':'line', 'p1':5160009, 'p2':5160010})
create_and_post({'t':'line', 'p1':5160010, 'p2':5200010})
create_and_post({'t':'line', 'p1':5200010, 'p2':5200006})
create_and_post({'t':'line', 'p1':5200006, 'p2':5210007})
create_and_post({'t':'line', 'p1':5210007, 'p2':5210008})
create_and_post({'t':'line', 'p1':5210008, 'p2':5210009})
create_and_post({'t':'line', 'p1':5210009, 'p2':5210011})
create_and_post({'t':'line', 'p1':5210011, 'p2':5200010})
create_and_post({'t':'line', 'p1':5210011, 'p2':5190011})
create_and_post({'t':'line', 'p1':5190011, 'p2':5180011})
create_and_post({'t':'line', 'p1':5180011, 'p2':5170011})
create_and_post({'t':'line', 'p1':5170011, 'p2':5160010})
create_and_post({'t':'line', 'p1':5220006, 'p2':5240006})
create_and_post({'t':'line', 'p1':5240006, 'p2':5250006})
create_and_post({'t':'line', 'p1':5250006, 'p2':5260006})
create_and_post({'t':'line', 'p1':5220006, 'p2':5220008})
create_and_post({'t':'line', 'p1':5220008, 'p2':5220009})
create_and_post({'t':'line', 'p1':5220009, 'p2':5220010})
create_and_post({'t':'line', 'p1':5220010, 'p2':5260010})
create_and_post({'t':'line', 'p1':5260010, 'p2':5260006})
create_and_post({'t':'line', 'p1':5260006, 'p2':5270007})
create_and_post({'t':'line', 'p1':5270007, 'p2':5270008})
create_and_post({'t':'line', 'p1':5270008, 'p2':5270009})
create_and_post({'t':'line', 'p1':5270009, 'p2':5270011})
create_and_post({'t':'line', 'p1':5270011, 'p2':5260010})
create_and_post({'t':'line', 'p1':5270011, 'p2':5250011})
create_and_post({'t':'line', 'p1':5250011, 'p2':5240011})
create_and_post({'t':'line', 'p1':5240011, 'p2':5230011})
create_and_post({'t':'line', 'p1':5230011, 'p2':5220010})
create_and_post({'t':'line', 'p1':5150003, 'p2':5160003})
create_and_post({'t':'line', 'p1':5130005, 'p2':5130006})
create_and_post({'t':'line', 'p1':5180005, 'p2':5180006})
create_and_post({'t':'line', 'p1':5150008, 'p2':5160008})
create_and_post({'t':'line', 'p1':5330003, 'p2':5340003})
create_and_post({'t':'line', 'p1':5310005, 'p2':5310006})
create_and_post({'t':'line', 'p1':5360005, 'p2':5360006})
create_and_post({'t':'line', 'p1':5330008, 'p2':5340008})
create_and_post({'t':'line', 'p1':5280000, 'p2':5300000})
create_and_post({'t':'line', 'p1':5300000, 'p2':5310000})
create_and_post({'t':'line', 'p1':5310000, 'p2':5320000})
create_and_post({'t':'line', 'p1':5280000, 'p2':5280002})
create_and_post({'t':'line', 'p1':5280002, 'p2':5280003})
create_and_post({'t':'line', 'p1':5280003, 'p2':5280004})
create_and_post({'t':'line', 'p1':5280004, 'p2':5320004})
create_and_post({'t':'line', 'p1':5320004, 'p2':5320000})
create_and_post({'t':'line', 'p1':5320000, 'p2':5330001})
create_and_post({'t':'line', 'p1':5330001, 'p2':5330002})
create_and_post({'t':'line', 'p1':5330002, 'p2':5330003})
create_and_post({'t':'line', 'p1':5330003, 'p2':5330005})
create_and_post({'t':'line', 'p1':5330005, 'p2':5320004})
create_and_post({'t':'line', 'p1':5330005, 'p2':5310005})
create_and_post({'t':'line', 'p1':5310005, 'p2':5300005})
create_and_post({'t':'line', 'p1':5300005, 'p2':5290005})
create_and_post({'t':'line', 'p1':5290005, 'p2':5280004})
create_and_post({'t':'line', 'p1':5340000, 'p2':5360000})
create_and_post({'t':'line', 'p1':5360000, 'p2':5370000})
create_and_post({'t':'line', 'p1':5370000, 'p2':5380000})
create_and_post({'t':'line', 'p1':5340000, 'p2':5340002})
create_and_post({'t':'line', 'p1':5340002, 'p2':5340003})
create_and_post({'t':'line', 'p1':5340003, 'p2':5340004})
create_and_post({'t':'line', 'p1':5340004, 'p2':5380004})
create_and_post({'t':'line', 'p1':5380004, 'p2':5380000})
create_and_post({'t':'line', 'p1':5380000, 'p2':5390001})
create_and_post({'t':'line', 'p1':5390001, 'p2':5390002})
create_and_post({'t':'line', 'p1':5390002, 'p2':5390003})
create_and_post({'t':'line', 'p1':5390003, 'p2':5390005})
create_and_post({'t':'line', 'p1':5390005, 'p2':5380004})
create_and_post({'t':'line', 'p1':5390005, 'p2':5370005})
create_and_post({'t':'line', 'p1':5370005, 'p2':5360005})
create_and_post({'t':'line', 'p1':5360005, 'p2':5350005})
create_and_post({'t':'line', 'p1':5350005, 'p2':5340004})
create_and_post({'t':'line', 'p1':5280006, 'p2':5300006})
create_and_post({'t':'line', 'p1':5300006, 'p2':5310006})
create_and_post({'t':'line', 'p1':5310006, 'p2':5320006})
create_and_post({'t':'line', 'p1':5280006, 'p2':5280008})
create_and_post({'t':'line', 'p1':5280008, 'p2':5280009})
create_and_post({'t':'line', 'p1':5280009, 'p2':5280010})
create_and_post({'t':'line', 'p1':5280010, 'p2':5320010})
create_and_post({'t':'line', 'p1':5320010, 'p2':5320006})
create_and_post({'t':'line', 'p1':5320006, 'p2':5330007})
create_and_post({'t':'line', 'p1':5330007, 'p2':5330008})
create_and_post({'t':'line', 'p1':5330008, 'p2':5330009})
create_and_post({'t':'line', 'p1':5330009, 'p2':5330011})
create_and_post({'t':'line', 'p1':5330011, 'p2':5320010})
create_and_post({'t':'line', 'p1':5330011, 'p2':5310011})
create_and_post({'t':'line', 'p1':5310011, 'p2':5300011})
create_and_post({'t':'line', 'p1':5300011, 'p2':5290011})
create_and_post({'t':'line', 'p1':5290011, 'p2':5280010})
create_and_post({'t':'line', 'p1':5340006, 'p2':5360006})
create_and_post({'t':'line', 'p1':5360006, 'p2':5370006})
create_and_post({'t':'line', 'p1':5370006, 'p2':5380006})
create_and_post({'t':'line', 'p1':5340006, 'p2':5340008})
create_and_post({'t':'line', 'p1':5340008, 'p2':5340009})
create_and_post({'t':'line', 'p1':5340009, 'p2':5340010})
create_and_post({'t':'line', 'p1':5340010, 'p2':5380010})
create_and_post({'t':'line', 'p1':5380010, 'p2':5380006})
create_and_post({'t':'line', 'p1':5380006, 'p2':5390007})
create_and_post({'t':'line', 'p1':5390007, 'p2':5390008})
create_and_post({'t':'line', 'p1':5390008, 'p2':5390009})
create_and_post({'t':'line', 'p1':5390009, 'p2':5390011})
create_and_post({'t':'line', 'p1':5390011, 'p2':5380010})
create_and_post({'t':'line', 'p1':5390011, 'p2':5370011})
create_and_post({'t':'line', 'p1':5370011, 'p2':5360011})
create_and_post({'t':'line', 'p1':5360011, 'p2':5350011})
create_and_post({'t':'line', 'p1':5350011, 'p2':5340010})
create_and_post({'t':'line', 'p1':5270003, 'p2':5280003})
create_and_post({'t':'line', 'p1':5250005, 'p2':5250006})
create_and_post({'t':'line', 'p1':5300005, 'p2':5300006})
create_and_post({'t':'line', 'p1':5270008, 'p2':5280008})
create_and_post({'t':'line', 'p1':5450003, 'p2':5460003})
create_and_post({'t':'line', 'p1':5430005, 'p2':5430006})
create_and_post({'t':'line', 'p1':5480005, 'p2':5480006})
create_and_post({'t':'line', 'p1':5450008, 'p2':5460008})
create_and_post({'t':'line', 'p1':5400000, 'p2':5420000})
create_and_post({'t':'line', 'p1':5420000, 'p2':5430000})
create_and_post({'t':'line', 'p1':5430000, 'p2':5440000})
create_and_post({'t':'line', 'p1':5400000, 'p2':5400002})
create_and_post({'t':'line', 'p1':5400002, 'p2':5400003})
create_and_post({'t':'line', 'p1':5400003, 'p2':5400004})
create_and_post({'t':'line', 'p1':5400004, 'p2':5440004})
create_and_post({'t':'line', 'p1':5440004, 'p2':5440000})
create_and_post({'t':'line', 'p1':5440000, 'p2':5450001})
create_and_post({'t':'line', 'p1':5450001, 'p2':5450002})
create_and_post({'t':'line', 'p1':5450002, 'p2':5450003})
create_and_post({'t':'line', 'p1':5450003, 'p2':5450005})
create_and_post({'t':'line', 'p1':5450005, 'p2':5440004})
create_and_post({'t':'line', 'p1':5450005, 'p2':5430005})
create_and_post({'t':'line', 'p1':5430005, 'p2':5420005})
create_and_post({'t':'line', 'p1':5420005, 'p2':5410005})
create_and_post({'t':'line', 'p1':5410005, 'p2':5400004})
create_and_post({'t':'line', 'p1':5460000, 'p2':5480000})
create_and_post({'t':'line', 'p1':5480000, 'p2':5490000})
create_and_post({'t':'line', 'p1':5490000, 'p2':5500000})
create_and_post({'t':'line', 'p1':5460000, 'p2':5460002})
create_and_post({'t':'line', 'p1':5460002, 'p2':5460003})
create_and_post({'t':'line', 'p1':5460003, 'p2':5460004})
create_and_post({'t':'line', 'p1':5460004, 'p2':5500004})
create_and_post({'t':'line', 'p1':5500004, 'p2':5500000})
create_and_post({'t':'line', 'p1':5500000, 'p2':5510001})
create_and_post({'t':'line', 'p1':5510001, 'p2':5510002})
create_and_post({'t':'line', 'p1':5510002, 'p2':5510003})
create_and_post({'t':'line', 'p1':5510003, 'p2':5510005})
create_and_post({'t':'line', 'p1':5510005, 'p2':5500004})
create_and_post({'t':'line', 'p1':5510005, 'p2':5490005})
create_and_post({'t':'line', 'p1':5490005, 'p2':5480005})
create_and_post({'t':'line', 'p1':5480005, 'p2':5470005})
create_and_post({'t':'line', 'p1':5470005, 'p2':5460004})
create_and_post({'t':'line', 'p1':5400006, 'p2':5420006})
create_and_post({'t':'line', 'p1':5420006, 'p2':5430006})
create_and_post({'t':'line', 'p1':5430006, 'p2':5440006})
create_and_post({'t':'line', 'p1':5400006, 'p2':5400008})
create_and_post({'t':'line', 'p1':5400008, 'p2':5400009})
create_and_post({'t':'line', 'p1':5400009, 'p2':5400010})
create_and_post({'t':'line', 'p1':5400010, 'p2':5440010})
create_and_post({'t':'line', 'p1':5440010, 'p2':5440006})
create_and_post({'t':'line', 'p1':5440006, 'p2':5450007})
create_and_post({'t':'line', 'p1':5450007, 'p2':5450008})
create_and_post({'t':'line', 'p1':5450008, 'p2':5450009})
create_and_post({'t':'line', 'p1':5450009, 'p2':5450011})
create_and_post({'t':'line', 'p1':5450011, 'p2':5440010})
create_and_post({'t':'line', 'p1':5450011, 'p2':5430011})
create_and_post({'t':'line', 'p1':5430011, 'p2':5420011})
create_and_post({'t':'line', 'p1':5420011, 'p2':5410011})
create_and_post({'t':'line', 'p1':5410011, 'p2':5400010})
create_and_post({'t':'line', 'p1':5460006, 'p2':5480006})
create_and_post({'t':'line', 'p1':5480006, 'p2':5490006})
create_and_post({'t':'line', 'p1':5490006, 'p2':5500006})
create_and_post({'t':'line', 'p1':5460006, 'p2':5460008})
create_and_post({'t':'line', 'p1':5460008, 'p2':5460009})
create_and_post({'t':'line', 'p1':5460009, 'p2':5460010})
create_and_post({'t':'line', 'p1':5460010, 'p2':5500010})
create_and_post({'t':'line', 'p1':5500010, 'p2':5500006})
create_and_post({'t':'line', 'p1':5500006, 'p2':5510007})
create_and_post({'t':'line', 'p1':5510007, 'p2':5510008})
create_and_post({'t':'line', 'p1':5510008, 'p2':5510009})
create_and_post({'t':'line', 'p1':5510009, 'p2':5510011})
create_and_post({'t':'line', 'p1':5510011, 'p2':5500010})
create_and_post({'t':'line', 'p1':5510011, 'p2':5490011})
create_and_post({'t':'line', 'p1':5490011, 'p2':5480011})
create_and_post({'t':'line', 'p1':5480011, 'p2':5470011})
create_and_post({'t':'line', 'p1':5470011, 'p2':5460010})
create_and_post({'t':'line', 'p1':5390003, 'p2':5400003})
create_and_post({'t':'line', 'p1':5370005, 'p2':5370006})
create_and_post({'t':'line', 'p1':5420005, 'p2':5420006})
create_and_post({'t':'line', 'p1':5390008, 'p2':5400008})
create_and_post({'t':'line', 'p1':5570003, 'p2':5580003})
create_and_post({'t':'line', 'p1':5550005, 'p2':5550006})
create_and_post({'t':'line', 'p1':5600005, 'p2':5600006})
create_and_post({'t':'line', 'p1':5570008, 'p2':5580008})
create_and_post({'t':'line', 'p1':5520000, 'p2':5540000})
create_and_post({'t':'line', 'p1':5540000, 'p2':5550000})
create_and_post({'t':'line', 'p1':5550000, 'p2':5560000})
create_and_post({'t':'line', 'p1':5520000, 'p2':5520002})
create_and_post({'t':'line', 'p1':5520002, 'p2':5520003})
create_and_post({'t':'line', 'p1':5520003, 'p2':5520004})
create_and_post({'t':'line', 'p1':5520004, 'p2':5560004})
create_and_post({'t':'line', 'p1':5560004, 'p2':5560000})
create_and_post({'t':'line', 'p1':5560000, 'p2':5570001})
create_and_post({'t':'line', 'p1':5570001, 'p2':5570002})
create_and_post({'t':'line', 'p1':5570002, 'p2':5570003})
create_and_post({'t':'line', 'p1':5570003, 'p2':5570005})
create_and_post({'t':'line', 'p1':5570005, 'p2':5560004})
create_and_post({'t':'line', 'p1':5570005, 'p2':5550005})
create_and_post({'t':'line', 'p1':5550005, 'p2':5540005})
create_and_post({'t':'line', 'p1':5540005, 'p2':5530005})
create_and_post({'t':'line', 'p1':5530005, 'p2':5520004})
create_and_post({'t':'line', 'p1':5580000, 'p2':5600000})
create_and_post({'t':'line', 'p1':5600000, 'p2':5610000})
create_and_post({'t':'line', 'p1':5610000, 'p2':5620000})
create_and_post({'t':'line', 'p1':5580000, 'p2':5580002})
create_and_post({'t':'line', 'p1':5580002, 'p2':5580003})
create_and_post({'t':'line', 'p1':5580003, 'p2':5580004})
create_and_post({'t':'line', 'p1':5580004, 'p2':5620004})
create_and_post({'t':'line', 'p1':5620004, 'p2':5620000})
create_and_post({'t':'line', 'p1':5620000, 'p2':5630001})
create_and_post({'t':'line', 'p1':5630001, 'p2':5630002})
create_and_post({'t':'line', 'p1':5630002, 'p2':5630003})
create_and_post({'t':'line', 'p1':5630003, 'p2':5630005})
create_and_post({'t':'line', 'p1':5630005, 'p2':5620004})
create_and_post({'t':'line', 'p1':5630005, 'p2':5610005})
create_and_post({'t':'line', 'p1':5610005, 'p2':5600005})
create_and_post({'t':'line', 'p1':5600005, 'p2':5590005})
create_and_post({'t':'line', 'p1':5590005, 'p2':5580004})
create_and_post({'t':'line', 'p1':5520006, 'p2':5540006})
create_and_post({'t':'line', 'p1':5540006, 'p2':5550006})
create_and_post({'t':'line', 'p1':5550006, 'p2':5560006})
create_and_post({'t':'line', 'p1':5520006, 'p2':5520008})
create_and_post({'t':'line', 'p1':5520008, 'p2':5520009})
create_and_post({'t':'line', 'p1':5520009, 'p2':5520010})
create_and_post({'t':'line', 'p1':5520010, 'p2':5560010})
create_and_post({'t':'line', 'p1':5560010, 'p2':5560006})
create_and_post({'t':'line', 'p1':5560006, 'p2':5570007})
create_and_post({'t':'line', 'p1':5570007, 'p2':5570008})
create_and_post({'t':'line', 'p1':5570008, 'p2':5570009})
create_and_post({'t':'line', 'p1':5570009, 'p2':5570011})
create_and_post({'t':'line', 'p1':5570011, 'p2':5560010})
create_and_post({'t':'line', 'p1':5570011, 'p2':5550011})
create_and_post({'t':'line', 'p1':5550011, 'p2':5540011})
create_and_post({'t':'line', 'p1':5540011, 'p2':5530011})
create_and_post({'t':'line', 'p1':5530011, 'p2':5520010})
create_and_post({'t':'line', 'p1':5580006, 'p2':5600006})
create_and_post({'t':'line', 'p1':5600006, 'p2':5610006})
create_and_post({'t':'line', 'p1':5610006, 'p2':5620006})
create_and_post({'t':'line', 'p1':5580006, 'p2':5580008})
create_and_post({'t':'line', 'p1':5580008, 'p2':5580009})
create_and_post({'t':'line', 'p1':5580009, 'p2':5580010})
create_and_post({'t':'line', 'p1':5580010, 'p2':5620010})
create_and_post({'t':'line', 'p1':5620010, 'p2':5620006})
create_and_post({'t':'line', 'p1':5620006, 'p2':5630007})
create_and_post({'t':'line', 'p1':5630007, 'p2':5630008})
create_and_post({'t':'line', 'p1':5630008, 'p2':5630009})
create_and_post({'t':'line', 'p1':5630009, 'p2':5630011})
create_and_post({'t':'line', 'p1':5630011, 'p2':5620010})
create_and_post({'t':'line', 'p1':5630011, 'p2':5610011})
create_and_post({'t':'line', 'p1':5610011, 'p2':5600011})
create_and_post({'t':'line', 'p1':5600011, 'p2':5590011})
create_and_post({'t':'line', 'p1':5590011, 'p2':5580010})
create_and_post({'t':'line', 'p1':5510003, 'p2':5520003})
create_and_post({'t':'line', 'p1':5490005, 'p2':5490006})
create_and_post({'t':'line', 'p1':5540005, 'p2':5540006})
create_and_post({'t':'line', 'p1':5510008, 'p2':5520008})
create_and_post({'t':'line', 'p1':5690003, 'p2':5700003})
create_and_post({'t':'line', 'p1':5670005, 'p2':5670006})
create_and_post({'t':'line', 'p1':5720005, 'p2':5720006})
create_and_post({'t':'line', 'p1':5690008, 'p2':5700008})
create_and_post({'t':'line', 'p1':5640000, 'p2':5660000})
create_and_post({'t':'line', 'p1':5660000, 'p2':5670000})
create_and_post({'t':'line', 'p1':5670000, 'p2':5680000})
create_and_post({'t':'line', 'p1':5640000, 'p2':5640002})
create_and_post({'t':'line', 'p1':5640002, 'p2':5640003})
create_and_post({'t':'line', 'p1':5640003, 'p2':5640004})
create_and_post({'t':'line', 'p1':5640004, 'p2':5680004})
create_and_post({'t':'line', 'p1':5680004, 'p2':5680000})
create_and_post({'t':'line', 'p1':5680000, 'p2':5690001})
create_and_post({'t':'line', 'p1':5690001, 'p2':5690002})
create_and_post({'t':'line', 'p1':5690002, 'p2':5690003})
create_and_post({'t':'line', 'p1':5690003, 'p2':5690005})
create_and_post({'t':'line', 'p1':5690005, 'p2':5680004})
create_and_post({'t':'line', 'p1':5690005, 'p2':5670005})
create_and_post({'t':'line', 'p1':5670005, 'p2':5660005})
create_and_post({'t':'line', 'p1':5660005, 'p2':5650005})
create_and_post({'t':'line', 'p1':5650005, 'p2':5640004})
create_and_post({'t':'line', 'p1':5700000, 'p2':5720000})
create_and_post({'t':'line', 'p1':5720000, 'p2':5730000})
create_and_post({'t':'line', 'p1':5730000, 'p2':5740000})
create_and_post({'t':'line', 'p1':5700000, 'p2':5700002})
create_and_post({'t':'line', 'p1':5700002, 'p2':5700003})
create_and_post({'t':'line', 'p1':5700003, 'p2':5700004})
create_and_post({'t':'line', 'p1':5700004, 'p2':5740004})
create_and_post({'t':'line', 'p1':5740004, 'p2':5740000})
create_and_post({'t':'line', 'p1':5740000, 'p2':5750001})
create_and_post({'t':'line', 'p1':5750001, 'p2':5750002})
create_and_post({'t':'line', 'p1':5750002, 'p2':5750003})
create_and_post({'t':'line', 'p1':5750003, 'p2':5750005})
create_and_post({'t':'line', 'p1':5750005, 'p2':5740004})
create_and_post({'t':'line', 'p1':5750005, 'p2':5730005})
create_and_post({'t':'line', 'p1':5730005, 'p2':5720005})
create_and_post({'t':'line', 'p1':5720005, 'p2':5710005})
create_and_post({'t':'line', 'p1':5710005, 'p2':5700004})
create_and_post({'t':'line', 'p1':5640006, 'p2':5660006})
create_and_post({'t':'line', 'p1':5660006, 'p2':5670006})
create_and_post({'t':'line', 'p1':5670006, 'p2':5680006})
create_and_post({'t':'line', 'p1':5640006, 'p2':5640008})
create_and_post({'t':'line', 'p1':5640008, 'p2':5640009})
create_and_post({'t':'line', 'p1':5640009, 'p2':5640010})
create_and_post({'t':'line', 'p1':5640010, 'p2':5680010})
create_and_post({'t':'line', 'p1':5680010, 'p2':5680006})
create_and_post({'t':'line', 'p1':5680006, 'p2':5690007})
create_and_post({'t':'line', 'p1':5690007, 'p2':5690008})
create_and_post({'t':'line', 'p1':5690008, 'p2':5690009})
create_and_post({'t':'line', 'p1':5690009, 'p2':5690011})
create_and_post({'t':'line', 'p1':5690011, 'p2':5680010})
create_and_post({'t':'line', 'p1':5690011, 'p2':5670011})
create_and_post({'t':'line', 'p1':5670011, 'p2':5660011})
create_and_post({'t':'line', 'p1':5660011, 'p2':5650011})
create_and_post({'t':'line', 'p1':5650011, 'p2':5640010})
create_and_post({'t':'line', 'p1':5700006, 'p2':5720006})
create_and_post({'t':'line', 'p1':5720006, 'p2':5730006})
create_and_post({'t':'line', 'p1':5730006, 'p2':5740006})
create_and_post({'t':'line', 'p1':5700006, 'p2':5700008})
create_and_post({'t':'line', 'p1':5700008, 'p2':5700009})
create_and_post({'t':'line', 'p1':5700009, 'p2':5700010})
create_and_post({'t':'line', 'p1':5700010, 'p2':5740010})
create_and_post({'t':'line', 'p1':5740010, 'p2':5740006})
create_and_post({'t':'line', 'p1':5740006, 'p2':5750007})
create_and_post({'t':'line', 'p1':5750007, 'p2':5750008})
create_and_post({'t':'line', 'p1':5750008, 'p2':5750009})
create_and_post({'t':'line', 'p1':5750009, 'p2':5750011})
create_and_post({'t':'line', 'p1':5750011, 'p2':5740010})
create_and_post({'t':'line', 'p1':5750011, 'p2':5730011})
create_and_post({'t':'line', 'p1':5730011, 'p2':5720011})
create_and_post({'t':'line', 'p1':5720011, 'p2':5710011})
create_and_post({'t':'line', 'p1':5710011, 'p2':5700010})
create_and_post({'t':'line', 'p1':5630003, 'p2':5640003})
create_and_post({'t':'line', 'p1':5610005, 'p2':5610006})
create_and_post({'t':'line', 'p1':5660005, 'p2':5660006})
create_and_post({'t':'line', 'p1':5630008, 'p2':5640008})
create_and_post({'t':'line', 'p1':5810003, 'p2':5820003})
create_and_post({'t':'line', 'p1':5790005, 'p2':5790006})
create_and_post({'t':'line', 'p1':5840005, 'p2':5840006})
create_and_post({'t':'line', 'p1':5810008, 'p2':5820008})
create_and_post({'t':'line', 'p1':5760000, 'p2':5780000})
create_and_post({'t':'line', 'p1':5780000, 'p2':5790000})
create_and_post({'t':'line', 'p1':5790000, 'p2':5800000})
create_and_post({'t':'line', 'p1':5760000, 'p2':5760002})
create_and_post({'t':'line', 'p1':5760002, 'p2':5760003})
create_and_post({'t':'line', 'p1':5760003, 'p2':5760004})
create_and_post({'t':'line', 'p1':5760004, 'p2':5800004})
create_and_post({'t':'line', 'p1':5800004, 'p2':5800000})
create_and_post({'t':'line', 'p1':5800000, 'p2':5810001})
create_and_post({'t':'line', 'p1':5810001, 'p2':5810002})
create_and_post({'t':'line', 'p1':5810002, 'p2':5810003})
create_and_post({'t':'line', 'p1':5810003, 'p2':5810005})
create_and_post({'t':'line', 'p1':5810005, 'p2':5800004})
create_and_post({'t':'line', 'p1':5810005, 'p2':5790005})
create_and_post({'t':'line', 'p1':5790005, 'p2':5780005})
create_and_post({'t':'line', 'p1':5780005, 'p2':5770005})
create_and_post({'t':'line', 'p1':5770005, 'p2':5760004})
create_and_post({'t':'line', 'p1':5820000, | |
<reponame>h4l/rnginline<filename>rnginline/__init__.py<gh_stars>1-10
from __future__ import unicode_literals
import contextlib
import re
import collections
import pkgutil
import copy
import os
from os import path
import uuid
from functools import reduce
import operator
from lxml import etree
import six
from rnginline import postprocess, uri, urlhandlers
from rnginline.constants import (NSMAP, RNG_DIV_TAG, RNG_START_TAG,
RNG_DEFINE_TAG, RNG_INCLUDE_TAG,
RNG_GRAMMAR_TAG, RNG_NS)
from rnginline.exceptions import (
SchemaIncludesSelfError, NoAvailableHandlerError, ParseError,
InvalidGrammarError)
__version__ = "0.0.2"
__all__ = ["inline", "Inliner"]
# Section 5.4 of XLink specifies that chars other than these must be escaped
# in values of href attrs before using them as URIs:
NOT_ESCAPED = "".join(chr(x) for x in
# ASCII are OK
set(range(128)) -
# But not control chars
set(range(0, 31)) -
# And not these reserved chars
set(ord(c) for c in " <>\"{}|\\^`"))
# Matches chars which must be escaped in href attrs
NEEDS_ESCAPE_RE = re.compile("[^{0}]"
.format(re.escape(NOT_ESCAPED)).encode("ascii"))
RELAXNG_SCHEMA = etree.RelaxNG(etree.fromstring(
pkgutil.get_data("rnginline", "relaxng.rng")))
_etree = etree # maintain access to etree in methods w/ etree param.
def inline(src=None, etree=None, url=None, path=None, file=None, handlers=None,
postprocessors=None, create_validator=True, base_uri=None,
default_base_uri=None, inliner=None):
"""
Load an XML document containing a RELAX NG schema, recursively loading and
inlining any ``<include href="...">``/``<externalRef href="...">``
elements to form a complete schema in a single XML document.
URLs in ``href`` attributes are dereferenced to obtain the RELAX NG schemas
they point to using one or more URL Handlers. By default, handlers for
``file:`` and ``pydata:`` URLs are registered.
Keyword Args:
src: The source to load the schema from. Either an ``lxml.etree``
``Element``, a URL, filesystem path or file-like object
etree: Explicitly provide an ``lxml.etree`` ``Element`` as the source
url: Explicitly provide a URL as the source
path: Explicitly provide a filesystem path as the source
file: Explicitly provide a file-like object as the source
handlers: An iterable of ``UrlHandler`` objects which are, in turn,
requested to fetch each ``href`` attribute's URL. Defaults to
the :obj:`rnginline.urlhandlers.file` and
:py:obj:`rnginline.urlhandlers.pydata` in that order.
base_uri: A URI to override the base URI of the schema with. Useful
when the source doesn't have a sensible base URI, e.g. passing a
file object like ``sys.stdin``
postprocessors: An iterable of ``PostProcess`` objects which perform
arbitary transformations on the inlined XML before it's returned/
loaded as a schema. Defaults to the result of calling
:func:`rnginline.postprocess.get_default_postprocessors`
create_validator: If True, a validator created via
``lxml.etree.RelaxNG()`` is returned instead of an lxml ``Element``
default_base_uri: The root URI which all others are resolved against.
Defaults to ``file:<current directory>`` which relative file URLs
such as ``'external.rng'`` to be found relative to the current
working directory.
inliner: The class to create the ``Inliner`` instance from. Defaults to
:class:`rnginline.Inliner`.
create_validator: If True, an lxml RelaxNG validator is created
from the loaded XML document and returned. If False then the
loaded XML is returned.
Returns:
A ``lxml.etree.RelaxNG`` validator from the fully loaded and inlined
XML, or the XML itself, depending on the ``create_validator`` argument.
Raises:
RelaxngInlineError: (or subclass) is raised if the schema can't be
loaded.
"""
inliner_cls = Inliner if inliner is None else inliner
inliner_obj = inliner_cls(handlers=handlers, postprocessors=postprocessors,
default_base_uri=default_base_uri)
return inliner_obj.inline(src=src, etree=etree, url=url, path=path,
file=file, base_uri=base_uri,
create_validator=create_validator)
class InlineContext(object):
"""
Maintains state through an inlining operation to prevent infinite loops,
and allow each unique URL to be dereferenced only once.
"""
def __init__(self, dereferenced_urls=None, stack=None):
self.dereferenced_urls = (
{} if dereferenced_urls is None else dereferenced_urls)
self.url_context_stack = [] if stack is None else stack
def has_been_dereferenced(self, url):
return url in self.dereferenced_urls
def get_previous_dereference(self, url):
return self.dereferenced_urls[url]
def store_dereference_result(self, url, content):
assert url not in self.dereferenced_urls
assert isinstance(content, six.binary_type)
self.dereferenced_urls[url] = content
def url_in_context(self, url):
return any(u == url for (u, _, _) in self.url_context_stack)
def _push_context(self, url, trigger_el):
if trigger_el is None and len(self.url_context_stack) != 0:
raise ValueError("Only the first url can omit a trigger element")
if self.url_in_context(url):
raise SchemaIncludesSelfError.from_context_stack(
url, trigger_el, self.url_context_stack)
token = object()
self.url_context_stack.append((url, token, trigger_el))
return token
def _pop_context(self, url, token):
if len(self.url_context_stack) == 0:
raise ValueError("Context stack is empty")
head = self.url_context_stack.pop()
if head[:2] != (url, token):
raise ValueError("Context stack head is different from expectation"
". expected: {0}, actual: {1}"
.format((url, token), head[:2]))
def track(self, url, trigger_el=None):
"""
A context manager which keeps track of inlining under the specified
url. If an attempt is made to inline a url which is already being
inlined, an error will be raised (as it indicates a direct or indirect
self reference).
"""
@contextlib.contextmanager
def tracker(url):
token = self._push_context(url, trigger_el)
yield
self._pop_context(url, token)
return tracker(url)
class Inliner(object):
"""
Inliners merge references to external schemas into an input schema via
their ``inline()`` method.
Typically you can ignore this class and just use
:py:func:`rnginline.inline` which handles instantiating an ``Inliner`` and
calling its ``inline()`` method.
"""
def __init__(self, handlers=None, postprocessors=None,
default_base_uri=None):
"""
Create an Inliner with the specified Handlers, PostProcessors and
default base URI.
Args:
handlers: A list of URL Handler objects to handle URLs encountered
by ``inline()``. Defaults to the
:obj:`rnginline.urlhandlers.file` and
:py:obj:`rnginline.urlhandlers.pydata` in that order.
postprocessors: A list of PostProcess objects to apply to the fully
inlined schema XML before it's returned by ``inline()``.
Defaults to the result of calling
:func:`rnginline.postprocess.get_default_postprocessors`
default_base_uri: The root URI which all others are resolved
against. Defaults to ``file:<current directory>``
"""
self.handlers = list(
self.get_default_handlers() if handlers is None else handlers)
self.postprocessors = list(self.get_default_postprocessors()
if postprocessors is None
else postprocessors)
if default_base_uri is None:
self.default_base_uri = self.get_default_default_base_uri()
else:
if not uri.is_uri(default_base_uri):
raise ValueError("default_base_uri is not a valid URI: {0}"
.format(default_base_uri))
self.default_base_uri = default_base_uri
# Yes, this is the default's default.
def get_default_default_base_uri(self):
"""
Get the URI to use as the default_base_uri if none is provided.
"""
dir = _get_cwd()
assert dir.endswith("/")
return urlhandlers.file.makeurl(dir, abs=True)
def get_default_postprocessors(self):
return postprocess.get_default_postprocessors()
def get_default_handlers(self):
return urlhandlers.get_default_handlers()
def postprocess(self, grammar):
for pp in self.postprocessors:
grammar = pp.postprocess(grammar)
return grammar
def get_handler(self, url):
handlers = (h for h in self.handlers if h.can_handle(url))
try:
return next(handlers)
except StopIteration:
raise NoAvailableHandlerError(
"No handler can handle url: {0}".format(url))
def dereference_url(self, url, context):
if context.has_been_dereferenced(url):
content = context.get_previous_dereference(url)
else:
handler = self.get_handler(url)
content = handler.dereference(url)
context.store_dereference_result(url, content)
return self.parse_grammar_xml(content, url)
def parse_grammar_xml(self, xml_string, base_url):
try:
xml = etree.fromstring(xml_string, base_url=base_url)
except etree.ParseError as cause:
err = ParseError("Unable to parse result of dereferencing "
"url: {0}. error: {1}".format(base_url, cause))
six.raise_from(err, cause)
assert xml.getroottree().docinfo.URL == base_url
# Ensure the parsed XML is a relaxng grammar
self.validate_grammar_xml(xml)
return xml
@classmethod
def _strip_non_rng(cls, tree):
if etree.QName(tree).namespace != RNG_NS:
if tree.getparent() is not None:
tree.getparent().remove(tree)
return None
non_rng_attrs = [key for key in tree.attrib.keys()
if etree.QName(key).namespace not in [None, RNG_NS]]
for key in non_rng_attrs:
del tree.attrib[key]
for child in tree.iterchildren(tag=etree.Element):
cls._strip_non_rng(child)
return tree
def validate_grammar_xml(self, grammar):
"""
Checks that grammar is an XML document matching the RELAX NG schema.
"""
url = self.get_source_url(grammar) or "??"
msg = ("The XML document from url: {0} was not a valid RELAX NG "
"schema: {1}")
# libxml2's RELAX NG validator does not implement <except>, so we can't
# validate with the RELAX NG schema which permits foreign
# elements/attributes. We can validate against the schema provided in
# the RELAX NG spec, which does not permit foreign elements. To do this
# we have to manually strip foreign elements from a copy of the XML and
# validate that...
stripped = self._strip_non_rng(copy.deepcopy(grammar))
if stripped is None:
reason = "The root element is not a RELAX NG schema element."
raise InvalidGrammarError(msg.format(url, reason))
try:
RELAXNG_SCHEMA.assertValid(stripped)
except etree.DocumentInvalid as cause:
err = InvalidGrammarError(msg.format(url, cause))
six.raise_from(err, cause)
def get_source_url(self, xml):
return xml.getroottree().docinfo.URL
def create_validator(self, schema):
# This should not fail under normal circumstances as we've validated
# our input RELAX NG schemas. However, if the libxml2 compat is
# disabled and a buggy libxml2 version is used then this could fail.
# It seems inappropriate to catch and rethrow such an error as our own,
# as it's lxml (via libxml2)'s problem and the user will have
# explicitly disabled our workaround to protect them from it.
return etree.RelaxNG(schema)
def inline(self, src=None, etree=None, url=None, path=None, file=None,
base_uri=None, create_validator=True):
"""
Load an XML document containing a RELAX NG schema, recursively loading
and | |
than or
equal to the other instance. And this happens if all elements in the
first instance are equal or less than the opposing elements of the
second instance.
Parameters
----------
other: CircularLinkedList()
The other instance that we want to compare with the current one
Returns
-------
bool:
`True` if the first instance is less than or equal to the second
instance, and `False` otherwise.
Raises
------
TypeError:
This happens in two cases
1. If the other instance isn't an instance of
`CircularLinkedList()`.
2. In case one element in the first instance doesn't match the
type of the opposing element in the other instance.
Examples
--------
>>> cll_1 = CircularLinkedList([1, 3, 2])
>>> cll_2 = CircularLinkedList([1, 3, 3])
>>> cll_1 <= cll_2
True
>>> cll_1 = CircularLinkedList([1, 3])
>>> cll_2 = CircularLinkedList([1, 3, 3])
>>> cll_1 <= cll_2
True
>>> cll_1 = CircularLinkedList([1, 5])
>>> cll_2 = CircularLinkedList([1, 3, 3])
>>> cll_1 <= cll_2
False
>>> cll_1 = CircularLinkedList([5, 2, 1])
>>> cll_2 = CircularLinkedList([1, 3, 3])
>>> cll_1 <= cll_2
False
>>> cll_1 = CircularLinkedList([1, 2, 3])
>>> cll_2 = CircularLinkedList([1, 2, 3])
>>> cll_1 <= cll_2
True
"""
return super().__le__(other)
def __gt__(self, other):
"""
Checks if the first `CircularLinkedList()` instance is greater than the
other instance. And this happens if all elements in the first instance
are equal with at least one element greater than the opposing element
of the second instance.
Parameters
----------
other: CircularLinkedList()
The other instance that we want to compare with the current one
Returns
-------
bool:
`True` if the first instance is greater than the second, and
`False` otherwise.
Raises
------
TypeError:
This happens in two cases
1. If the other instance isn't an instance of
`CircularLinkedList()`.
2. In case one element in the first instance doesn't match the
type of the opposing element in the other instance.
Examples
--------
>>> cll_1 = CircularLinkedList([1, 3, 5])
>>> cll_2 = CircularLinkedList([1, 3, 3])
>>> cll_1 > cll_2
True
>>> cll_1 = CircularLinkedList([1, 3, 2, 1])
>>> cll_2 = CircularLinkedList([1, 3, 2])
>>> cll_1 > cll_2
True
>>> cll_1 = CircularLinkedList([1, 2])
>>> cll_2 = CircularLinkedList([1, 3, 3])
>>> cll_1 > cll_2
False
>>> cll_1 = CircularLinkedList([5, 2, 1])
>>> cll_2 = CircularLinkedList([1, 3, 3])
>>> cll_1 > cll_2
False
>>> cll_1 = CircularLinkedList([1, 2, 3])
>>> cll_2 = CircularLinkedList([1, 2, 3])
>>> cll_1 > cll_2
False
"""
return super().__gt__(other)
def __ge__(self, other):
"""
Checks if the first `CircularLinkedList()` instance is greater than or
equal to the other instance. And this happens if all elements in the
first instance are greater than or equal to the opposing element of the
second instance.
Parameters
----------
other: CircularLinkedList()
The other instance that we want to compare with the current one
Returns
-------
bool:
`True` if the first instance is greater than or equal to the
second, and `False` otherwise.
Raises
------
TypeError:
This happens in two cases
1. If the other instance isn't an instance of
`CircularLinkedList()`.
2. In case one element in the first instance doesn't match the
type of the opposing element in the other instance.
Examples
--------
>>> cll_1 = CircularLinkedList([1, 3, 5])
>>> cll_2 = CircularLinkedList([1, 3, 3])
>>> cll_1 >= cll_2
True
>>> cll_1 = CircularLinkedList([1, 3, 2, 1])
>>> cll_2 = CircularLinkedList([1, 3, 2])
>>> cll_1 >= cll_2
True
>>> cll_1 = CircularLinkedList([1, 2])
>>> cll_2 = CircularLinkedList([1, 3, 3])
>>> cll_1 >= cll_2
False
>>> cll_1 = CircularLinkedList([5, 2, 1])
>>> cll_2 = CircularLinkedList([1, 3, 3])
>>> cll_1 >= cll_2
False
>>> cll_1 = CircularLinkedList([1, 2, 3])
>>> cll_2 = CircularLinkedList([1, 2, 3])
>>> cll_1 >= cll_2
True
"""
return super().__ge__(other)
# ============================= SEARCH ==============================
def __contains__(self, value):
"""
Checks if the given value exists in the `CircularLinkedList()` instance
in time-complexity of O(n) where **n** is the total number of elements
in the `CircularLinkedList()` instance.
Parameters
----------
value: Object
The value to be searched for in the `CircularLinkedList()`
instance.
Returns
-------
bool:
`True` if the given value exists in the `CircularLinkedList()`
instance, and `False` otherwise.
Examples
--------
>>> cll = CircularLinkedList([1, 3, 5])
>>> 1 in cll
True
>>> 0 in cll
False
>>> "hello" in cll
False
"""
for item in self:
if item == value:
return True
return False
def _validate_index(self, idx, accept_negative=False, accept_slice=False):
"""
Checks the validity of the given index. It raises the appropriate error
when the index isn't valid and it returns nothing if the index is
valid.
Parameters
----------
idx: int
The index value.
accept_negative: bool
A flag to enable accepting negative indices, default `False`.
accept_slice: bool
A flag to enable accepting `slice` objects, default `False`.
Raises
------
TypeError:
If the given index isn't `int`.
IndexError:
This happens in one of the following cases:
1. if the given index is a `slice` object while `accept_slice`
flag is `False`.
2. If the given index is negative while `accept_negative` flag
is `False`.
Examples
--------
>>> cll = CircularLinkedList([1, 2, 3])
>>> cll._validate_index('1')
TypeError: Given index must be an integer!!
>>> cll._validate_index(-2)
IndexError: Negative indexing isn't supported with this functinoality!!
>>> cll._validate_index(slice(0, 2))
IndexError: Slice indexing isn't supported with this functinoality!!
And it would return nothing if the given index if valid:
>>> cll._validate_index(2)
>>> cll._validate_index(-2, accept_negative=True)
>>> cll._validate_index(slice(0, 2), accept_slice=True)
"""
if isinstance(idx, slice):
if not accept_slice:
raise IndexError(
"Slice indexing isn't supported with this functinoality!!"
)
elif type(idx) != int:
raise TypeError("Given index must be an integer!!")
elif idx <= -1 and not accept_negative:
raise IndexError(
"Negative indexing isn't supported with this functinoality!!"
)
def __getitem__(self, idx):
"""
Retrieves the element at the given index. The given index must be a
zero-based `int`. This method doesn't support neither negative indexing
nor `slice` objects. This method does that in time-complexity of O(k%n)
where **k** is the given index and **n** is the number of elements
found in the `CircularLinkedList()` instance.
Parameters
----------
idx: int
The index to be used to retrieve value from `CircularLinkedList()`
instance.
Returns
-------
object:
It returns the value stored at this the given index.
Raises
------
TypeError:
If the given index isn't `int`.
IndexError:
If `CircularLinkedList()` instance is empty.
Examples
--------
>>> cll = CircularLinkedList([1, 2, 3, 4, 5])
>>> cll[0]
1
>>> cll[-2]
4
>>> cll[10]
1
Note
----
Notice that the only case this method raises an `IndexError` is when
the `CircularLinkedList()` instance is empty. Other than that, the
method will keep iterating over the `CircularLinkedList()` instance
till it reaches the given index. That's why even though the previous
`CircularLinkedList()` instance is five-elements long, the method
doesn't raise and `IndexError` when trying to retrieve the 10th
element.
"""
self._validate_index(idx)
if self.is_empty():
raise IndexError(f"{self.__name__} is empty!!")
idx = idx % self._length if self._length != 0 else 0
return super().__getitem__(idx)
# ============================= INSERT ==============================
def _insert_node(self, prev_node, new_node):
"""
Inserts a `new_node` at a position defined by the given `prev_node`.
Parameters
----------
prev_node: Node()
A reference to the node next to which a new node should be inserted
new_node: Node()
A referece to the new node to be inserted.
Returns
-------
Node():
A reference to the new node after being inserted in the
`CircularLinkedList()` instance.
Raises
------
AssertionError:
This happens in one of the following cases:
1. The `prev_node` isn't a `Node()` object or `None`.
2. The `new_node` isn't a `Node()` object
Example
-------
>>> cll = CircularLinkedList([1, 2, 3])
>>> new_node = Node(10)
>>> cll._insert_node(cll._head, new_node)
Node(data: 10, next: 2)
"""
assert prev_node is None or isinstance(prev_node, self._basic_node)
assert isinstance(new_node, self._basic_node)
# start inserting the node
if self._length == 0:
new_node.set_next(new_node)
self._head = new_node
elif prev_node is None:
new_node.set_next(self._head.get_next())
self._head.set_next(new_node)
# swap data between new_node and self._head
new_node._data, self._head._data = self._head._data, new_node._data
new_node = self._head # to be returned
else:
new_node.set_next(prev_node.get_next())
prev_node.set_next(new_node)
self._length += 1
return new_node
def add_front(self, item):
"""
| |
#!/usr/bin/python
# Copyright (c) [2012-], <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted #provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of #conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
from collections import defaultdict
from NGramStack import NGramStack
from math import log
import re
from pprint import pprint
class KNSmoother( ):
"""
Stand-alone python implementation of interpolated Fixed Kneser-Ney discounting.
Intended for educational purposes, this should produce results identical
to mitlm's 'estimate-ngram' utility,
mitlm:
$ estimate-ngram -o 3 -t train.corpus -s FixKN
SimpleKN.py:
$ SimpleKN.py -t train.corpus
WARNING: This program has not been optimized in any way and will almost
surely be extremely slow for anything larger than a small toy corpus.
"""
def __init__( self, order=3, sb="<s>", se="</s>" ):
self.sb = sb
self.se = se
self.order = order
self.ngrams = NGramStack(order=order)
self.denominators = [ defaultdict(float) for i in xrange(order-1) ]
self.numerators = [ defaultdict(float) for i in xrange(order-1) ]
self.nonZeros = [ defaultdict(float) for i in xrange(order-1) ]
self.CoC = [ [ 0.0 for j in xrange(4) ] for i in xrange(order) ]
self.discounts = [ 0.0 for i in xrange(order-1) ]
self.UD = 0.
self.UN = defaultdict(float)
def _compute_counts_of_counts( self ):
"""
Compute counts-of-counts (CoC) for each N-gram order.
Only CoC<=4 are relevant to the computation of
either ModKNFix or KNFix.
"""
for k in self.UN:
if self.UN[k] <= 4:
self.CoC[0][int(self.UN[k]-1)] += 1.
for i,dic in enumerate(self.numerators):
for k in dic:
if dic[k]<=4:
self.CoC[i+1][int(dic[k]-1)] += 1.
return
def _compute_discounts( self ):
"""
Compute the discount parameters. Note that unigram counts
are not discounted in either FixKN or FixModKN.
---------------------------------
Fixed Kneser-Ney smoothing: FixKN
---------------------------------
This is based on the solution described in Kneser-Ney '95,
and reformulated in Chen&Goodman '98.
D = N_1 / ( N_1 + 2(N_2) )
where N_1 refers to the # of N-grams that appear exactly
once, and N_2 refers to the number of N-grams that appear
exactly twice. This is computed for each order.
NOTE: The discount formula for FixKN is identical
for Absolute discounting.
"""
#Uniform discount for each N-gram order
for o in xrange(self.order-1):
self.discounts[o] = self.CoC[o+1][0] / (self.CoC[o+1][0]+2*self.CoC[o+1][1])
return
def _get_discount( self, order, ngram ):
"""
Retrieve the pre-computed discount for this N-gram.
"""
return self.discounts[order]
def kneser_ney_from_counts( self, arpa_file ):
"""
Train the KN-discount language model from an ARPA format
file containing raw count data. This can be generated with,
$ ./SimpleCount.py --train train.corpus -r > counts.arpa
"""
m_ord = c_ord = 0
for line in open(arpa_file, "r"):
ngram, count = line.strip().split("\t")
count = float(count)
ngram = ngram.split(" ")
if len(ngram)==2:
self.UD += 1.0
if len(ngram)==2:
self.UN[" ".join(ngram[1:])] += 1.0
self.nonZeros[len(ngram)-2][" ".join(ngram[:-1])] += 1.0
if ngram[0]==self.sb:
self.numerators[len(ngram)-2][" ".join(ngram)] += count
self.denominators[len(ngram)-2][" ".join(ngram[:-1])] += count
if len(ngram)>2 and len(ngram)<self.order:
self.numerators[len(ngram)-3][" ".join(ngram[1:])] += 1.0
self.denominators[len(ngram)-3][" ".join(ngram[1:-1])] += 1.0
self.nonZeros[len(ngram)-2][" ".join(ngram[:-1])] += 1.0
if ngram[0]==self.sb:
self.numerators[len(ngram)-2][" ".join(ngram)] += count
self.denominators[len(ngram)-2][" ".join(ngram[:-1])] += count
if len(ngram)==self.order:
self.numerators[len(ngram)-3][" ".join(ngram[1:])] += 1.0
self.numerators[len(ngram)-2][" ".join(ngram)] = count
self.denominators[len(ngram)-3][" ".join(ngram[1:-1])] += 1.0
self.denominators[len(ngram)-2][" ".join(ngram[:-1])] += count
self.nonZeros[len(ngram)-2][" ".join(ngram[:-1])] += 1.0
self._compute_counts_of_counts ( )
self._compute_discounts( )
#self._print_raw_counts( )
return
def kneser_ney_discounting( self, training_file ):
"""
Iterate through the training data using a FIFO stack or
'window' of max-length equal to the specified N-gram order.
Each time a new word is pushed onto the N-gram stack call
the _kn_recurse() subroutine to increment the N-gram
contexts in the current window / on the stack.
If pushing a word onto the stack makes len(stack)>max-order,
then the word at the bottom (stack[0]) is popped off.
"""
for line in open(training_file,"r"):
#Split the current line into words.
words = re.split(r"\s+",line.strip())
#Push a sentence-begin token onto the stack
self.ngrams.push(self.sb)
for word in words:
#Get the current 'window' of N-grams
ngram = self.ngrams.push(word)
#Now count all N-grams in the current window
#These will be of span <= self.order
self._kn_recurse( ngram, len(ngram)-2 )
#Now push the sentence-end token onto the stack
ngram = self.ngrams.push(self.se)
self._kn_recurse( ngram, len(ngram)-2 )
#Clear the stack for the next sentence
self.ngrams.clear()
self._compute_counts_of_counts ( )
self._compute_discounts( )
#self._print_raw_counts( )
return
def _print_raw_counts( self ):
"""
Convenience function for sanity checking the history counts.
"""
print "NUMERATORS:"
for key in sorted(self.UN.iterkeys()):
print " ", key, self.UN[key]
for o in xrange(len(self.numerators)):
print "ORD",o
for key in sorted(self.numerators[o].iterkeys()):
print " ", key, self.numerators[o][key]
print "DENOMINATORS:"
print self.UD
for o in xrange(len(self.denominators)):
print "DORD", o
for key in sorted(self.denominators[o].iterkeys()):
print " ", key, self.denominators[o][key]
print "NONZEROS:"
for o in xrange(len(self.nonZeros)):
print "ZORD", o
for key in sorted(self.nonZeros[o].iterkeys()):
print " ", key, self.nonZeros[o][key]
def _kn_recurse( self, ngram_stack, i ):
"""
Kneser-Ney discount calculation recursion.
"""
if i==-1 and ngram_stack[0]==self.sb:
return
o = len(ngram_stack)
numer = " ".join(ngram_stack[o-(i+2):])
denom = " ".join(ngram_stack[o-(i+2):o-1])
self.numerators[ i][numer] += 1.
self.denominators[i][denom] += 1.
if self.numerators[i][numer]==1.:
self.nonZeros[i][denom] += 1.
if i>0:
self._kn_recurse( ngram_stack, i-1 )
else:
#The <s> (sentence-begin) token is
# NOT counted as a unigram event
if not ngram_stack[-1]==self.sb:
self.UN[ngram_stack[-1]] += 1.
self.UD += 1.
return
def print_ARPA( self ):
"""
Print the interpolated Kneser-Ney LM out in ARPA format,
computing the interpolated probabilities and back-off
weights for each N-gram on-demand. The format:
----------------------------
\data\
ngram 1=NUM_1GRAMS
ngram 2=NUM_2GRAMS
...
ngram N=NUM_NGRAMS (max order)
\1-grams:
p(a_z) a_z bow(a_z)
...
\2-grams:
p(a_z) a_z bow(a_z)
...
\N-grams:
p(a_z) a_z
...
\end\
----------------------------
"""
#Handle the header info
print "\\data\\"
print "ngram 1=%d" % (len(self.UN)+1)
for o in xrange(0,self.order-1):
print "ngram %d=%d" % (o+2,len(self.numerators[o]) )
#Handle the Unigrams
print "\n\\1-grams:"
d = self.discounts[0]
#KN discount
lmda = self.nonZeros[0][self.sb] * d / self.denominators[0][self.sb]
print "-99.00000\t%s\t%0.6f" % ( self.sb, log(lmda, 10.) )
for key in sorted(self.UN.iterkeys()):
if key==self.se:
print "%0.6f\t%s\t-99" % ( log(self.UN[key]/self.UD, 10.), key )
continue
d = self.discounts[0]
#KN discount
lmda = self.nonZeros[0][key] * d / self.denominators[0][key]
print "%0.6f\t%s\t%0.6f" % ( log(self.UN[key]/self.UD, 10.), key, log(lmda, 10.) )
#Handle the middle-order N-grams
for o in xrange(0,self.order-2):
print "\n\\%d-grams:" % (o+2)
for key in sorted(self.numerators[o].iterkeys()):
if key.endswith(self.se):
#No back-off prob for N-grams ending in </s>
prob = self._compute_interpolated_prob( key )
print "%0.6f\t%s" % ( log(prob, 10.), key )
continue
d = self.discounts[o+1]
#Compute the back-off weight
#KN discount
lmda = self.nonZeros[o+1][key] * d / self.denominators[o+1][key]
#Compute the interpolated N-gram probability
prob = self._compute_interpolated_prob( key )
print "%0.6f\t%s\t%0.6f" % ( log(prob, 10.), key, log(lmda, 10.))
#Handle the N-order N-grams
print "\n\\%d-grams:" % (self.order)
for key in sorted(self.numerators[self.order-2].iterkeys()):
#Compute the interpolated N-gram probability
prob = self._compute_interpolated_prob( key )
print "%0.6f\t%s" % ( log(prob, 10.), key )
print "\n\\end\\"
return
def _compute_interpolated_prob( self, ngram ):
"""
Compute the interpolated probability for the input ngram.
Cribbing the notation from the SRILM webpages,
a_z = An N-gram where a is the first word, z is the
last word, and "_" represents 0 | |
<gh_stars>1000+
###############################################################################
# BSD 3-Clause License
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Author & Contact: <NAME> (<EMAIL>)
###############################################################################
from configure_data import configure_data
def add_general_args(parser):
group = parser.add_argument_group('general', 'general purpose arguments')
group.add_argument('--model', type=str, default='mLSTM',
help='type of recurrent net (RNNTanh, RNNReLU, LSTM, mLSTM, GRU)')
group.add_argument('--lr', type=float, default=5e-4,
help='initial learning rate')
group.add_argument('--constant-decay', type=int, default=None,
help='number of iterations to decay LR over,' + \
' None means decay to zero over training')
group.add_argument('--clip', type=float, default=0,
help='gradient clipping')
group.add_argument('--epochs', type=int, default=1,
help='upper epoch limit')
group.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
group.add_argument('--seed', type=int, default=1234,
help='random seed')
group.add_argument('--log-interval', type=int, default=100, metavar='N',
help='report interval')
group.add_argument('--save', type=str, default='lang_model.pt',
help='path to save the final model')
group.add_argument('--load', type=str, default=None,
help='path to a previously saved model checkpoint')
group.add_argument('--load-optim', action='store_true',
help='load most recent optimizer to resume training')
group.add_argument('--save-iters', type=int, default=10000, metavar='N',
help='save current model progress interval')
group.add_argument('--save-optim', action='store_true',
help='save most recent optimizer')
group.add_argument('--fp16', action='store_true',
help='Run model in pseudo-fp16 mode (fp16 storage fp32 math).')
group.add_argument('--dynamic-loss-scale', action='store_true',
help='Dynamically look for loss scalar for fp16 convergance help.')
group.add_argument('--no-weight-norm', action='store_true',
help='Add weight normalization to model.')
group.add_argument('--loss-scale', type=float, default=1,
help='Static loss scaling, positive power of 2 values can improve fp16 convergence.')
group.add_argument('--world-size', type=int, default=1,
help='number of distributed workers')
group.add_argument('--distributed-backend', default='gloo',
help='which backend to use for distributed training. One of [gloo, nccl]')
group.add_argument('--rank', type=int, default=-1,
help='distributed worker rank. Typically set automatically from multiproc.py')
group.add_argument('--optim', default='Adam',
help='One of PyTorch\'s optimizers (Adam, SGD, etc). Default: Adam')
group.add_argument('--chkpt-grad', action='store_true',
help='checkpoint gradients to allow for training with larger models and sequences')
group.add_argument('--multinode-init', action='store_true',
help='initialize multinode. Environment variables should be set as according to https://pytorch.org/docs/stable/distributed.html')
return parser
def add_unsupervised_data_args(parser):
data_config, data_group = configure_data(parser)
# Set unsupervised L2R language modeling option defaults
data_config.set_defaults(data_set_type='L2R', transpose=True)
data_group.set_defaults(split='100,1,1')
# Create unsupervised-L2R-specific options
group = parser.add_argument_group('language modeling data options')
group.add_argument('--seq-length', type=int, default=256,
help="Maximum sequence length to process (for unsupervised rec)")
group.add_argument('--eval-seq-length', type=int, default=256,
help="Maximum sequence length to process for evaluation")
group.add_argument('--lazy', action='store_true',
help='whether to lazy evaluate the data set')
group.add_argument('--persist-state', type=int, default=1,
help='0=reset state after every sample in a shard, 1=reset state after every shard, -1=never reset state')
group.add_argument('--train-iters', type=int, default=1000,
help="""number of iterations per epoch to run training for""")
group.add_argument('--eval-iters', type=int, default=100,
help="""number of iterations per epoch to run validation/test for""")
group.add_argument('--decay-style', type=str, default=None, choices=['constant', 'linear', 'cosine', 'exponential'],
help='one of constant(None), linear, cosine, or exponential')
group.add_argument('--stlr-cut-frac', type=float, default=None,
help='what proportion of iterations to peak the slanted triangular learning rate')
group.add_argument('--warmup', type=float, default=0,
help='percentage of data to warmup on (.03 = 3% of all training iters). Default 0')
return data_config, parser
def add_model_args(parser):
args, _ = parser.parse_known_args()
if args.model.lower() == 'transformer':
return add_transformer_args(parser)
else:
return add_recurrent_args(parser)
def add_recurrent_args(parser):
group = parser.add_argument_group('recurrent', 'arguments for building recurrent nets')
group.add_argument('--num-hidden-warmup', type=int, default=0,
help='number of times to conduct hidden state warmup passes through inputs to be used for transfer tasks')
group.add_argument('--emsize', type=int, default=64,
help='size of word embeddings')
group.add_argument('--nhid', type=int, default=4096,
help='number of hidden units per layer')
group.add_argument('--nlayers', type=int, default=1,
help='number of layers')
group.add_argument('--dropout', type=float, default=0.0,
help='dropout applied to layers (0 = no dropout)')
group.add_argument('--neural-alphabet', action='store_true',
help='whether to use the neural alphabet encoder structure')
group.add_argument('--alphabet-size', type=int, default=128,
help='number of letters in neural alphabet')
group.add_argument('--ncontext', type=int, default=2,
help='number of context characters used in neural alphabet encoder structure')
group.add_argument('--residuals', action='store_true',
help='whether to implement residual connections between stackedRNN layers')
return parser
def add_transformer_args(parser):
group = parser.add_argument_group('transformer', 'args for specifically building a transformer network')
group.add_argument('--dropout', type=float, default=0.1,
help='dropout probability -- transformer only')
group.add_argument('--attention-dropout', type=float, default=0.0,
help='dropout probability for attention weights -- transformer only')
group.add_argument('--relu-dropout', type=float, default=0.1,
help='dropout probability after ReLU in FFN -- transformer only')
#ignore the encoder args for transformer. That's meant for seq2seq transformer
group.add_argument('--encoder-embed-path', type=str, default=None,
help='path to pre-trained encoder embedding')
group.add_argument('--encoder-embed-dim', type=int, default=64, # originally 512 but 64 for char level
help='encoder embedding dimension')
group.add_argument('--encoder-ffn-embed-dim', type=int, default=256, # originally 2048 but scaled for char level
help='encoder embedding dimension for FFN')
group.add_argument('--encoder-layers', type=int, default=6,
help='num encoder layers')
group.add_argument('--encoder-attention-heads', type=int, default=8,
help='num encoder attention heads')
group.add_argument('--encoder-normalize-before', default=False, action='store_true',
help='apply layernorm before each encoder block')
group.add_argument('--encoder-learned-pos', default=False, action='store_true',
help='use learned positional embeddings in the encoder')
group.add_argument('--decoder-embed-path', type=str, default=None,
help='path to pre-trained decoder embedding')
group.add_argument('--decoder-embed-dim', type=int, default=64, # originally 512 but 64 for char level
help='decoder embedding dimension')
group.add_argument('--decoder-ffn-embed-dim', type=int, default=256, # originally 2048 but scaled for char level
help='decoder embedding dimension for FFN')
group.add_argument('--decoder-layers', type=int, default=6,
help='num decoder layers')
group.add_argument('--decoder-attention-heads', type=int, default=8,
help='num decoder attention heads')
group.add_argument('--decoder-learned-pos', default=False, action='store_true',
help='use learned positional embeddings in the decoder')
group.add_argument('--decoder-normalize-before', default=False, action='store_true',
help='apply layernorm before each decoder block')
group.add_argument('--share-decoder-input-output-embed', default=False, action='store_true',
help='share decoder input and output embeddings')
group.add_argument('--share-all-embeddings', default=False, action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
group.add_argument('--use-final-embed', action='store_true',
help='whether to use the final timestep embeddings as output of transformer (in classification)')
return parser
def add_classifier_model_args(parser):
group = parser.add_argument_group('classifier', 'arguments used in training a classifier on top of a language model')
group.add_argument('--max-seq-len', type=int, default=None,
help='maximum sequence length to use for classification. Transformer uses a lot of memory and needs shorter sequences.')
group.add_argument('--classifier-hidden-layers', default=None, nargs='+',
help='sizes of hidden layers for binary classifier on top of language model, so excluding the input layer and final "1"')
group.add_argument('--classifier-hidden-activation', type=str, default='PReLU',
help='[defaults to PReLU] activations used in hidden layers of MLP classifier (ReLU, Tanh, torch.nn module names)')
group.add_argument('--classifier-dropout', type=float, default=0.1,
help='Dropout in layers of MLP classifier')
group.add_argument('--all-layers', action='store_true',
help='if more than one layer is used, extract features from all layers, not just the last layer')
group.add_argument('--concat-max', action='store_true',
help='whether to concatenate max pools onto cell/hidden states of RNNFeaturizer')
group.add_argument('--concat-min', action='store_true',
help='whether to concatenate min pools onto cell/hidden states of RNNFeaturizer')
group.add_argument('--concat-mean', action='store_true',
help='whether to concatenate mean pools onto cell/hidden states of RNNFeaturizer')
group.add_argument('--get-hidden', action='store_true',
help='whether to use the hidden state (as opposed to cell state) as features for classifier')
group.add_argument('--neurons', default=1, type=int,
help='number of nenurons to extract as features')
group.add_argument('--heads-per-class', type=int, default=1,
help='set > 1 for multiple heads per class prediction (variance, regularlization)')
parser.add_argument('--use-softmax', action='store_true', help='use softmax for classification')
group.add_argument('--double-thresh', action='store_true',
help='whether to report all metrics at once')
group.add_argument('--dual-thresh', action='store_true',
help='for 2 columns positive and negative, thresholds classes s.t. positive, negative, neutral labels are available')
group.add_argument('--joint-binary-train', action='store_true',
help='Train with dual thresholded (positive/negative/neutral) classes and other normal binary classes.\
Arguments to non-binary-cols must be passed with positive negative classes first.\
Ex: `--non-binary-cols positive negative <other classes>`')
group.set_defaults(epochs=5)
return parser
def add_sentiment_transfer_args(parser):
data_config, data_group = configure_data(parser)
# Set transfer learning data option defaults
data_group.set_defaults(split='1.', data=['data/binary_sst/train.csv'])
data_group.set_defaults(valid=['data/binary_sst/val.csv'], test=['data/binary_sst/test.csv'])
# Create transfer-learning-specific options
group = parser.add_argument_group('sentiment_transfer', 'arguments used for sentiment_transfer script')
group.add_argument('--mcc', action='store_true',
help='whether to use the matthews correlation coefficient as a measure of accuracy (for CoLA)')
group.add_argument('--save-results', type=str, default='sentiment',
help='path to save intermediate and final results of transfer')
group.add_argument('--no-test-eval', action='store_true',
help='whether to not evaluate the test model (useful when your test set has no labels)')
group.add_argument('--write-results', type=str, default='',
help='write results of model on test (or train if none is specified) data to specified filepath ')
group.add_argument('--use-cached', action='store_true',
help='reuse cached featurizations from a previous run')
group.add_argument('--drop-neurons', action='store_true',
help='drop top neurons instead of keeping them')
return data_config, data_group, group, parser
def add_run_classifier_args(parser):
data_config, data_group = configure_data(parser)
# Set classification data option defaults
data_group.set_defaults(split='1.', data=['data/binary_sst/train.csv'])
data_group.set_defaults(shuffle=False)
# Create classification-specific options
group = parser.add_argument_group('run_classifier', 'arguments used for run classifier script')
group.add_argument('--save_probs', type=str, default='clf_results.npy',
help='path to save numpy of predicted probabilities')
group.add_argument('--write-results', type=str, default='',
help='path to location for CSV -- write results of model on data \
input strings + results and variances. Will not write if empty')
return data_config, data_group, group, parser
def add_finetune_classifier_args(parser):
data_config, data_group = configure_data(parser)
# Set finetuning data option defaults
data_group.set_defaults(split='1.', data=['data/binary_sst/train.csv'])
data_group.set_defaults(valid=['data/binary_sst/val.csv'], test=['data/binary_sst/test.csv'])
data_group.set_defaults(shuffle=True)
# Create finetuning-specific options
parser.set_defaults(get_hidden=True)
data_group.add_argument('--seq-length', type=int, default=256,
help="Maximum sequence length to process (for unsupervised rec)")
data_group.add_argument('--lazy', action='store_true',
help='whether to lazy evaluate the data set')
group = parser.add_argument_group('finetune_classifier', 'arguments used for finetune script')
group.add_argument('--use-logreg', action='store_true',
help='use scikitlearn logistic regression instead of finetuning whole classifier')
group.add_argument('--stlr-cut-frac', type=float, default=None,
help='what proportion of iterations to peak the slanted triangular learning rate')
group.add_argument('--cos-cut-frac', type=float, default=None,
help='what proportion of iterations to peak the cosine learning rate')
group.add_argument('--lr-decay', type=float, | |
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interact with Cloud Logging via JSON-over-HTTP."""
import functools
from google.api_core import page_iterator
from google.cloud import _http
from google.cloud.logging_v2 import __version__
from google.cloud.logging_v2._helpers import entry_from_resource
from google.cloud.logging_v2.sink import Sink
from google.cloud.logging_v2.metric import Metric
class Connection(_http.JSONConnection):
DEFAULT_API_ENDPOINT = "https://logging.googleapis.com"
def __init__(self, client, *, client_info=None, api_endpoint=DEFAULT_API_ENDPOINT):
"""A connection to Google Cloud Logging via the JSON REST API.
Args:
client (google.cloud.logging_v2.cliet.Client):
The client that owns the current connection.
client_info (Optional[google.api_core.client_info.ClientInfo]):
Instance used to generate user agent.
client_options (Optional[google.api_core.client_options.ClientOptions]):
Client options used to set user options
on the client. API Endpoint should be set through client_options.
"""
super(Connection, self).__init__(client, client_info)
self.API_BASE_URL = api_endpoint
self._client_info.gapic_version = __version__
self._client_info.client_library_version = __version__
API_VERSION = "v2"
"""The version of the API, used in building the API call's URL."""
API_URL_TEMPLATE = "{api_base_url}/{api_version}{path}"
"""A template for the URL of a particular API call."""
class _LoggingAPI(object):
"""Helper mapping logging-related APIs.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.logs
:type client: :class:`~google.cloud.logging.client.Client`
:param client: The client used to make API requests.
"""
def __init__(self, client):
self._client = client
self.api_request = client._connection.api_request
def list_entries(
self,
resource_names,
*,
filter_=None,
order_by=None,
max_results=None,
page_size=None,
page_token=None,
):
"""Return a page of log entry resources.
Args:
resource_names (Sequence[str]): Names of one or more parent resources
from which to retrieve log entries:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
filter_ (str): a filter expression. See
https://cloud.google.com/logging/docs/view/advanced_filters
order_by (str) One of :data:`~logging_v2.ASCENDING`
or :data:`~logging_v2.DESCENDING`.
max_results (Optional[int]):
Optional. The maximum number of entries to return.
Non-positive values are treated as 0. If None, uses API defaults.
page_size (int): number of entries to fetch in each API call. Although
requests are paged internally, logs are returned by the generator
one at a time. If not passed, defaults to a value set by the API.
page_token (str): opaque marker for the starting "page" of entries. If not
passed, the API will return the first page of entries.
Returns:
Generator[~logging_v2.LogEntry]
"""
extra_params = {"resourceNames": resource_names}
if filter_ is not None:
extra_params["filter"] = filter_
if order_by is not None:
extra_params["orderBy"] = order_by
if page_size is not None:
extra_params["pageSize"] = page_size
path = "/entries:list"
# We attach a mutable loggers dictionary so that as Logger
# objects are created by entry_from_resource, they can be
# re-used by other log entries from the same logger.
loggers = {}
item_to_value = functools.partial(_item_to_entry, loggers=loggers)
iterator = page_iterator.HTTPIterator(
client=self._client,
api_request=self._client._connection.api_request,
path=path,
item_to_value=item_to_value,
items_key="entries",
page_token=page_token,
extra_params=extra_params,
)
# This method uses POST to make a read-only request.
iterator._HTTP_METHOD = "POST"
return _entries_pager(iterator, max_results)
def write_entries(
self,
entries,
*,
logger_name=None,
resource=None,
labels=None,
partial_success=False,
dry_run=False,
):
"""Log an entry resource via a POST request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/write
Args:
entries (Sequence[Mapping[str, ...]]): sequence of mappings representing
the log entry resources to log.
logger_name (Optional[str]): name of default logger to which to log the entries;
individual entries may override.
resource(Optional[Mapping[str, ...]]): default resource to associate with entries;
individual entries may override.
labels (Optional[Mapping[str, ...]]): default labels to associate with entries;
individual entries may override.
partial_success (Optional[bool]): Whether valid entries should be written even if
some other entries fail due to INVALID_ARGUMENT or
PERMISSION_DENIED errors. If any entry is not written, then
the response status is the error associated with one of the
failed entries and the response includes error details keyed
by the entries' zero-based index in the ``entries.write``
method.
dry_run (Optional[bool]):
If true, the request should expect normal response,
but the entries won't be persisted nor exported.
Useful for checking whether the logging API endpoints are working
properly before sending valuable data.
"""
data = {
"entries": list(entries),
"partialSuccess": partial_success,
"dry_run": dry_run,
}
if logger_name is not None:
data["logName"] = logger_name
if resource is not None:
data["resource"] = resource
if labels is not None:
data["labels"] = labels
self.api_request(method="POST", path="/entries:write", data=data)
def logger_delete(self, logger_name):
"""Delete all entries in a logger.
Args:
logger_name (str): The resource name of the log to delete:
::
"projects/[PROJECT_ID]/logs/[LOG_ID]"
"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]"
"folders/[FOLDER_ID]/logs/[LOG_ID]"
``[LOG_ID]`` must be URL-encoded. For example,
``"projects/my-project-id/logs/syslog"``,
``"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"``.
"""
path = f"/{logger_name}"
self.api_request(method="DELETE", path=path)
class _SinksAPI(object):
"""Helper mapping sink-related APIs.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks
"""
def __init__(self, client):
self._client = client
self.api_request = client._connection.api_request
def list_sinks(self, parent, *, max_results=None, page_size=None, page_token=None):
"""List sinks for the parent resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/list
Args:
parent (str): The parent resource whose sinks are to be listed:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]".
max_results (Optional[int]):
Optional. The maximum number of entries to return.
Non-positive values are treated as 0. If None, uses API defaults.
page_size (int): number of entries to fetch in each API call. Although
requests are paged internally, logs are returned by the generator
one at a time. If not passed, defaults to a value set by the API.
page_token (str): opaque marker for the starting "page" of entries. If not
passed, the API will return the first page of entries.
Returns:
Generator[~logging_v2.Sink]
"""
extra_params = {}
if page_size is not None:
extra_params["pageSize"] = page_size
path = f"/{parent}/sinks"
iterator = page_iterator.HTTPIterator(
client=self._client,
api_request=self._client._connection.api_request,
path=path,
item_to_value=_item_to_sink,
items_key="sinks",
page_token=page_token,
extra_params=extra_params,
)
return _entries_pager(iterator, max_results)
def sink_create(
self, parent, sink_name, filter_, destination, *, unique_writer_identity=False
):
"""Create a sink resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/create
Args:
parent(str): The resource in which to create the sink:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]".
sink_name (str): The name of the sink.
filter_ (str): The advanced logs filter expression defining the
entries exported by the sink.
destination (str): Destination URI for the entries exported by
the sink.
unique_writer_identity (Optional[bool]): determines the kind of
IAM identity returned as writer_identity in the new sink.
Returns:
dict: The sink resource returned from the API.
"""
target = f"/{parent}/sinks"
data = {"name": sink_name, "filter": filter_, "destination": destination}
query_params = {"uniqueWriterIdentity": unique_writer_identity}
return self.api_request(
method="POST", path=target, data=data, query_params=query_params
)
def sink_get(self, sink_name):
"""Retrieve a sink resource.
Args:
sink_name (str): The resource name of the sink:
::
"projects/[PROJECT_ID]/sinks/[SINK_ID]"
"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]"
"folders/[FOLDER_ID]/sinks/[SINK_ID]"
Returns:
dict: The JSON sink object returned from the API.
"""
target = f"/{sink_name}"
return self.api_request(method="GET", path=target)
def sink_update(
self, sink_name, filter_, destination, *, unique_writer_identity=False
):
"""Update a sink resource.
Args:
sink_name (str): Required. The resource name of the sink:
::
"projects/[PROJECT_ID]/sinks/[SINK_ID]"
"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]"
"folders/[FOLDER_ID]/sinks/[SINK_ID]"
filter_ (str): The advanced logs filter expression defining the
entries exported by the sink.
destination (str): destination URI for the entries exported by
the sink.
unique_writer_identity (Optional[bool]): determines the kind of
IAM identity returned as writer_identity in the new sink.
Returns:
dict: The returned (updated) resource.
"""
target = f"/{sink_name}"
name = sink_name.split("/")[-1] # parse name out of full resoure name
data = {"name": name, "filter": filter_, "destination": destination}
query_params = {"uniqueWriterIdentity": unique_writer_identity}
return self.api_request(
method="PUT", path=target, query_params=query_params, data=data
)
def sink_delete(self, sink_name):
"""Delete a sink resource.
Args:
sink_name (str): Required. The full resource name of the sink to delete,
including the parent resource and the sink identifier:
::
"projects/[PROJECT_ID]/sinks/[SINK_ID]"
"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]"
"folders/[FOLDER_ID]/sinks/[SINK_ID]"
Example: ``"projects/my-project-id/sinks/my-sink-id"``.
"""
target = f"/{sink_name}"
self.api_request(method="DELETE", path=target)
class _MetricsAPI(object):
"""Helper mapping sink-related APIs."""
def __init__(self, client):
self._client = client
self.api_request = client._connection.api_request
def list_metrics(
self, project, *, max_results=None, page_size=None, page_token=None
):
"""List metrics for the project associated with this client.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/list
Args:
max_results (Optional[int]):
Optional. The maximum number of entries to return.
Non-positive values are treated as 0. If None, uses API defaults.
page_size (int): number of entries to fetch in each API call. Although
requests are paged internally, logs are returned by the generator
one at a time. If not passed, defaults to a value set by the API.
page_token (str): opaque marker for the starting "page" of entries. If not
passed, the API will return the first page of | |
for e in edges:
del self.id_to_edge[e.getId()]
v = vertexId(v)
del self.id_to_vertex[v]
print("deleteVertex#" + str(self.id).rstrip() + "#" + str(v))
sys.stdout.flush()
def addEdge(self, sourceVertex, targetVertex, edgeId = -1):
# return: Edge object with id only
sourceVertex = vertexId(sourceVertex)
targetVertex = vertexId(targetVertex)
idSubString = ""
if not edgeId == -1:
idSubString = "#"+str(edgeId)
line = "addEdge#"+str(self.id).rstrip() + "#" + str(sourceVertex).rstrip() + \
"#" + str(targetVertex).rstrip() + idSubString.rstrip()
print(line.rstrip())
sys.stdout.flush()
eid = sys.stdin.readline()
if eid != "\n": # it's possible that the edge cannot be added (e.g., a new selfloop)
e = Edge(self, eid)
self.id_to_edge[e.getId()] = e
return e
return None
def existsEdge(self, edge):
line = "existsEdge#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
print(line.rstrip())
sys.stdout.flush()
thereExistsAnEdge = sys.stdin.readline().rstrip()
return thereExistsAnEdge.lower() == "true"
def existsVertex(self, vertex):
line = "existsVertex#"+str(self.id).rstrip() + "#"
line = line + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
thereExistsAVertex = sys.stdin.readline().rstrip()
return thereExistsAVertex.lower() == "true"
def deleteEdge(self, edge):
del self.id_to_edge[edge.getId()]
line = "deleteEdge#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
print(line.rstrip())
sys.stdout.flush()
def getAllEdgesBetween(self, vertexPair):
line = "getAllEdgesBetween#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(vertexPair)
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
# creates a random Erdos-Reny graph with n id_to_vertex and edge probability p
def generateRandomGraph(self, vertexCount, p):
if not isinstance(vertexCount, int):
gPrint("Cannot generate a random graph, wrong parameter: \
vertex number must be an int.")
if vertexCount < 0:
gPrint("Cannot generate a random graph, wrong parameter: \
vertex number cannot be less than 0.")
if not isinstance(p, float) or p < 0 or p > 1.0:
gPrint("Cannot generate a random graph, wrong parameter: \
probability of an edge must be a float in [0,1].")
if vertexCount == 0:
return
vertices = []
coordinates = dict()
for id in range(vertexCount):
coordinates[id] = (10*math.cos(2*id*math.pi/vertexCount),
10*math.sin(2*id*math.pi/vertexCount))
nxgraph = nx.fast_gnp_random_graph(vertexCount, p)
d = dict()
id = 0
for nxV in nxgraph.nodes():
d[id] = nxV
id += 1
nxEdges = nxgraph.edges()
id = 0
for x in range(vertexCount):
vertices.append(self.addVertex(id, coordinates[id]))
id += 1
for x in vertices:
for y in vertices:
if x.getId() < y.getId():
if (d[x.getId()], d[y.getId()]) in nxEdges:
x.connect(y)
# end manilupative functions
# setter functions
# begin: best for private use!
def setVertexFillColor(self, vertex, colorHex=-1, colorRGB=-1):
vertex = vertexId(vertex)
line = "setVertexFillColor#" + str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#"
if not (colorHex == -1):
line = line + hexFormatter(str(colorHex))
elif not (colorRGB == -1):
try:
line = line + rgbFormatter(colorRGB)
except:
self.sendErrorToGralog("the rgb color: " + str(colorRGB).rstrip() + " is not properly formatted!")
else:
self.sendErrorToGralog("neither Hex nor RGB color specified!")
print(line.rstrip())
sys.stdout.flush()
def setVertexStrokeColor(self, vertex, colorHex=-1, colorRGB=-1):
vertex = vertexId(vertex)
# print("colorhex: " + str(colorHex))
line = "setVertexStrokeColor#"+str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#"
if not (colorHex == -1):
line = line + hexFormatter(str(colorHex))
elif not (colorRGB == -1) and len(colorRGB) == 3:
line = line + rgbFormatter(colorRGB)
print(line.rstrip())
sys.stdout.flush()
def setVertexCoordinates(self, vertex, coordinates):
line = "setVertexCoordinates#" + str(self.id).rstrip()+"#" + str(vertexId(vertex)).rstrip()
x = -1
y = -1
x = coordinates[0]
y = coordinates[1]
if x == None:
x = "empty"
if y == None:
y = "empty"
line += "#" + str(x).rstrip() + "#" + str(y).rstrip()
print(line)
sys.stdout.flush()
def setEdgeContour(self, edge, contour):
line = line = "setEdgeContour#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#" + str(contour).rstrip()
print(line)
sys.stdout.flush()
def setEdgeColor(self, edge, colorHex=-1, colorRGB=-1):
line = "setEdgeColor#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#"
if not (colorHex == -1):
line = line + hexFormatter(colorHex)
elif not (colorRGB == -1) and len(colorRGB) == 3:
line = line + rgbFormatter(colorRGB)
print(line.rstrip())
sys.stdout.flush()
def setVertexRadius(self, vertex, newRadius):
self.setVertexDimension(vertex, newRadius, "radius")
def setVertexHeight(self, vertex, newHeight):
self.setVertexDimension(vertex, newHeight, "height")
def setVertexWidth(self, vertex, newWidth):
self.setVertexDimension(vertex, newWidth, "width")
def setVertexDimension(self, vertex, newDimension, dimension):
vertex = vertexId(vertex)
line = "setVertexDimension#"+str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#" + str(newDimension).rstrip()+"#" + dimension.rstrip()
print(line.rstrip())
sys.stdout.flush()
def setVertexShape(self, vertex, shape):
vertex = vertexId(vertex)
line = "setVertexShape#" + str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#" + str(shape).rstrip()
print(line.rstrip())
sys.stdout.flush()
def setEdgeWeight(self, edge, weight):
self.setEdgeProperty(edge, "weight", weight)
def setEdgeThickness(self, edge, thickness):
self.setEdgeProperty(edge, "thickness", thickness)
def setEdgeProperty(self, edge, propertyName, value):
line = "setEdgeProperty#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#" + propertyName.rstrip().lower() + "#" + str(value).rstrip().lower()
print(line.rstrip())
sys.stdout.flush()
def setVertexProperty(self, vertex, propertyName, value):
line = "setVertexProperty#"+str(self.id).rstrip() + "#"
line = line + str(vertexId(vertex)).rstrip()
line = line + "#" + propertyName.rstrip().lower() + "#" + str(value).rstrip().lower()
print(line.rstrip())
sys.stdout.flush()
def setEdgeLabel(self, edge, label):
line = "setEdgeLabel#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#" + label
print(line.rstrip())
sys.stdout.flush()
def setVertexLabel(self, vertex, label):
vertex = vertexId(vertex)
line = "setVertexLabel#" + str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#" + label
print(line.rstrip())
sys.stdout.flush()
# end: best for private use!
def setGraph(self, graphFormat, graphString = "hello_world"):
graphFormat = graphFormat.lower()
line = "setGraph#"+str(self.id).rstrip() + "#" + graphFormat.rstrip()+"#"
if graphFormat == "gtgf" or graphFormat == "tgf":
line += "$$\n"
line += graphString
if graphFormat == "gtgf" or graphFormat == "tgf":
line += "$\n"
print(line)
sys.stdout.flush()
# TODO: implement this
# end setter functions
# getter functions
def toIgraph(self):
grlgML_file = open("tmp.graphml", "w")
grlgML_file.write(self.toXml())
grlgML_file.close()
g_ig = ig.Graph.Read_GraphML("tmp.graphml")
os.remove("tmp.graphml")
return g_ig
def toNx(self):
grlgML_file = open("tmp.graphml", "w")
grlgML_file.write(self.toXml())
grlgML_file.close()
g_nx = nx.read_graphml("tmp.graphml")
os.remove("tmp.graphml")
return g_nx
def toElementTree(self):
grlgML_file = open("tmp.graphml", "w")
grlgML_file.write(self.toXml())
grlgML_file.close()
g_ET = ET.parse("tmp.graphml")
os.remove("tmp.graphml")
return g_ET
def toXml(self):
return self.getGraph("xml")
def getGraph(self, graphFormat):
# warning!! importing as pure TGF will mean edge id's will
# be lost. This will result in errors on the Gralog side.
line = "getGraph#"+str(self.id).rstrip() + "#" + graphFormat.rstrip()
print(line.rstrip())
i = 0
sys.stdout.flush()
line = sys.stdin.readline()
graphString = ""
if graphFormat.lower() == "tgf" or graphFormat.lower() == "gtgf":
tgf = graphFormat.lower() == "tgf"
multiline = False
first = False
if line[0] == line[1] == '$':
multiline = True
if tgf:
first = True
line = sys.stdin.readline()
hashtagSeen = False
if not multiline:
return graphString
while line[0] != '$':
# gPrint("line: " + line +" and line[0]: " + line[0] + " and line[0]!='$': " + str(line[0] != '$'))
graphString += line
if line[0] == '#':
hashtagSeen = True
else:
if not first:
if hashtagSeen:
if tgf:
self.edgifyTGFCommand(line)
else:
self.edgifyGTGFCommand(line)
else:
if tgf:
self.vertexifyTGFCommand(line)
else:
self.vertexifyGTGFCommand(line)
line = sys.stdin.readline()
i += 1
first = False
return graphString
if graphFormat.lower() == "xml":
return line
def getAllVertices(self):
# return: list of Vertex objects with id
line = "getAllVertices#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
vertexIdStringList = (sys.stdin.readline()).split("#")
vertexList = []
for vertexIdString in vertexIdStringList:
if representsInt(vertexIdString):
v = self.getVertexOrNew(vertexIdString)
vertexList.append(v)
return vertexList
def getVertices(self):
return(self.getAllVertices())
def getAllEdges(self):
# return: list of fully sourced Edge objects with fully sourced endpoint Vertices
line = "getAllEdges#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
if len(endpointList) == 1 and endpointList[-1] == "\n":
endpointList = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getEdges(self):
return(self.getAllEdges())
# start: best for private use!
def getNeighbours(self, vertex):
# return: list of Vertex objects with id
vertex = vertexId(vertex)
line = "getNeighbours#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
neighbourIdStringList = (sys.stdin.readline()).split("#")
neighboursList = []
for neighbourIdString in neighbourIdStringList:
if representsInt(neighbourIdString):
v = self.getVertexOrNew(neighbourIdString)
neighboursList.append(v)
return neighboursList
def getOutgoingNeighbours(self, vertex):
# return: list of Vertex objects with id
vertex = vertexId(vertex)
line = "getOutgoingNeighbours#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
outgoingNeighbourIdStringList = (sys.stdin.readline()).split("#")
outgoingNeighboursList = []
for outgoingNeighbourIdString in outgoingNeighbourIdStringList:
if representsInt(outgoingNeighbourIdString):
v = self.getVertexOrNew(outgoingNeighbourIdString)
outgoingNeighboursList.append(v)
return outgoingNeighboursList
def getIncomingNeighbours(self, vertex):
# return: list of Vertex objects with id
vertex = vertexId(vertex)
line = "getIncomingNeighbours#"+str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
incomingNeighbourIdStringList = (sys.stdin.readline()).split("#")
incomingNeighboursList = []
for incomingNeighbourIdString in incomingNeighbourIdStringList:
if representsInt(incomingNeighbourIdString):
v = self.getVertexOrNew(incomingNeighbourIdString)
incomingNeighboursList.append(v)
return incomingNeighboursList
def getIncidentEdges(self, vertex):
# | |
<gh_stars>0
import itertools as it
import os
from collections import defaultdict
from pprint import pprint
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import mod.env.config as conf
import mod.env.network as nw
from mod.env.fleet.Car import Car
from mod.env.fleet.CarStatus import CarStatus
from mod.env.fleet.CarType import CarType
from mod.env.Point import Point
sns.set(style="ticks")
sns.set_context("paper")
np.set_printoptions(precision=3)
FOLDER_TESTING = "/adp/test"
FOLDER_TRAINING = "/adp/train"
FOLDER_MYOPIC = "/myopic"
FOLDER_POLICY_RANDOM = "/random"
ADP_LOGS = "adp_logs/"
FOLDER_TIME = "time/"
FOLDER_FLEET = "fleet/"
FOLDER_SERVICE = "service/"
PROGRESS_FILENAME = "progress.npy"
class EpisodeLog:
@property
def output_path(self):
return self.config.output_path
# if self.config.short_path:
# label = self.config.label_md5
# else:
# label = self.config.label
# if self.config.myopic:
# return conf.FOLDER_OUTPUT + label + FOLDER_MYOPIC
# if self.config.policy_random:
# return conf.FOLDER_OUTPUT + label + FOLDER_POLICY_RANDOM
# if self.save_progress and self.save_progress > 0:
# return conf.FOLDER_OUTPUT + label + FOLDER_TRAINING
# else:
# return conf.FOLDER_OUTPUT + label + FOLDER_TESTING
@property
def progress_path(self):
if self.config.short_path:
label = self.config.label_md5
else:
label = self.config.label
return f"{conf.FOLDER_OUTPUT}{label}/{PROGRESS_FILENAME}"
def create_folders(self):
# If config is not None, then the experiments should be saved
if self.config:
self.output_folder_delay = self.config.output_path + FOLDER_TIME
self.output_folder_fleet = self.config.output_path + FOLDER_FLEET
self.output_folder_service = (
self.config.output_path + FOLDER_SERVICE
)
self.output_folder_adp_logs = self.config.output_path + ADP_LOGS
self.folder_delay_data = self.output_folder_delay + "data/"
self.folder_fleet_status_data = self.output_folder_fleet + "data/"
self.folder_demand_status_data = (
self.output_folder_service + "data/"
)
# Creating folders to log MIP models
self.config.folder_mip = self.config.output_path + "mip/"
self.config.folder_mip_log = self.config.folder_mip + "log/"
self.config.folder_mip_lp = self.config.folder_mip + "lp/"
self.config.folder_adp_log = self.config.output_path + "logs/"
folders = [
self.output_folder_delay,
self.folder_delay_data,
self.output_folder_fleet,
self.folder_fleet_status_data,
self.output_folder_service,
self.folder_demand_status_data,
self.config.folder_mip_log,
self.config.folder_mip_lp,
self.config.folder_adp_log,
]
# Creating folders
for f in folders:
if not os.path.exists(f):
os.makedirs(f)
print(
f"### Saving episodes at:"
f"\n### - {self.config.output_path}"
f"\n### Saving plots at:"
f"\n### - {self.output_folder_fleet}"
f"\n### - {self.output_folder_service}"
)
def __init__(
self,
amod,
):
self.amod = amod
self.config = amod.config
self.adp = amod.adp
self.save_progress = amod.config.save_progress
self.create_folders()
@property
def n(self):
return self.adp.n
@property
def reward(self):
return self.adp.reward
@property
def service_rate(self):
return self.adp.service_rate
@property
def weights(self):
return self.adp.weights
def last_episode_stats(self):
try:
a = dict()
for k, v in self.adp.weights.items():
a[k] = v[-1]
stats_str = []
for sq, stats in self.adp.pk_delay[-1].items():
label_values = ", ".join(
[
f"{label}={v:2.2f}"
if isinstance(v, float)
else f"{label}={v:>5}"
for label, v in stats.items()
]
)
stats_str.append(f"{sq}[{label_values}]")
sq_delay_stats = ", ".join(stats_str)
delay_info = f"delays=({sq_delay_stats})"
stats_str_cars = []
for car_type, stats in self.adp.car_time[-1].items():
label_values = ", ".join(
[
f"{label}={v:>6.2f}"
if isinstance(v, float)
else f"{label}={v:>5}"
for label, v in stats.items()
]
)
stats_str_cars.append(f"{car_type}[{label_values}]")
car_type_stats = ", ".join(stats_str_cars)
car_time_info = f"car time status=({car_type_stats})"
return (
f"({self.adp.reward[-1]:15,.2f},"
f" {self.adp.service_rate[-1]:6.2%},"
f" {delay_info},"
f" {car_time_info}), "
f"Agg. level weights = {a}"
)
except:
return f"(0.00, 00.00%) Agg. level weights = []"
def compute_learning(self):
# Reward over the course of the whole experiment
self.plot_reward(
file_path=self.output_path + f"r_{self.adp.n:04}",
file_format="png",
dpi=150,
scale="linear",
)
# Service rate over the course of the whole experiment
self.plot_service_rate(
file_path=self.output_path + f"sl_{self.adp.n:04}",
file_format="png",
dpi=150,
)
# Service rate over the course of the whole experiment
self.plot_weights(
file_path=self.output_path + f"w_{self.adp.n:04}",
file_format="png",
dpi=150,
)
def plot_trip_delays(
self, rejected, serviced, file_path=None, file_format="png", dpi=150,
):
sns.set_context("talk", font_scale=1.4)
total_trips = 0
for sq, delays in serviced.items():
n_serviced = len(delays)
n_rejected = len(rejected[sq])
total = n_rejected + n_serviced
total_trips += total
plt.hist(
delays,
label=f"{sq}(S={n_serviced:>5}, R={n_rejected:>5}) {n_serviced / total:6.2%}",
)
plt.title(f"{total_trips}")
plt.xlabel("Delay (min)")
# Configure y axis
plt.ylabel("#Trips")
plt.legend(
loc="center left",
frameon=False,
bbox_to_anchor=(1, 0, 0.5, 1), # (0.5, -0.15),
ncol=1,
)
if file_path:
plt.savefig(
f"{file_path}.{file_format}", bbox_inches="tight", dpi=dpi
)
else:
plt.show()
plt.close()
def compute_episode(
self,
step_log,
it_step_trip_list,
processing_time,
fleet_size=None,
plots=True,
save_df=True,
save_learning=True,
save_after_iteration=1,
save_overall_stats=True,
):
# # Process trip data ######################################## #
# Class pickup delays of SERVICED users
trip_delays = defaultdict(list)
# Class in-vehicle distances (km) of SERVICED users
trip_distances = defaultdict(list)
# Class in-vehicle distances (km) of REJECTED users
trip_rejections = defaultdict(list)
# Class trip count
total_trips = defaultdict(int)
# Origins of rejected trips (new car starting points)
rejected_trip_origins = set()
last_trip_origins = set()
# Loop all trips from all steps
for t in it.chain(*it_step_trip_list):
total_trips[t.sq_class] += 1
last_trip_origins.add(t.o.id)
# If None -> Trip was rejected
if t.pk_delay is not None:
trip_delays[t.sq_class].append(t.pk_delay)
trip_distances[t.sq_class].append(
nw.get_distance(t.o.id, t.d.id)
)
else:
# Append travel distance of rejected trip
trip_rejections[t.sq_class].append(
nw.get_distance(t.o.id, t.d.id)
)
rejected_trip_origins.add(t.o.id)
delays_stats = dict()
step_log.env.rejected_trip_origins = list(rejected_trip_origins)
step_log.env.last_trip_origins = list(last_trip_origins)
# TODO change to "for sq in user_bases"
for sq, delays in trip_delays.items():
delays_stats[sq] = dict(
delay_mean=np.mean(delays),
delay_median=np.median(delays),
delay_total=np.sum(delays),
serviced=len(delays),
serviced_dist_mean=np.mean(trip_distances.get(sq, [0])),
serviced_dist_median=np.median(trip_distances.get(sq, [0])),
serviced_dist_total=np.sum(trip_distances.get(sq, [0])),
rejected=len(trip_rejections.get(sq, [0])),
rejected_dist_mean=np.mean(trip_rejections.get(sq, [0])),
rejected_dist_median=np.median(trip_rejections.get(sq, [0])),
rejected_dist_total=np.sum(trip_rejections.get(sq, [0])),
sl=len(delays) / total_trips.get(sq, 0),
)
# TODO comment this section
car_type_status_durations = defaultdict(lambda: defaultdict(list))
# How much time each car have spent in each status (in minutes)?
# dict(dict(dict()))
# CAR TYPE -> STATUS -> TOTAL DURATION
for c in it.chain(step_log.env.cars, step_log.env.overall_hired):
for status, duration in c.time_status.items():
car_type_status_durations[c.type][status].append(
np.sum(duration)
)
# Remove status level (insert it as label, such as "STATUS_total")
# dict(dict())
# E.g.: {"CARTYPE1":{"STATUS1_total":total_duration}}
car_type_status_dict = dict()
for car_type, status_durations in car_type_status_durations.items():
car_type_status_dict[car_type] = dict()
overall_duration = 0
for status, durations in status_durations.items():
# Create status
status_label = (
conf.status_label_dict[status].lower().replace(" ", "_")
)
total = np.sum(durations)
overall_duration += total
# mean = np.mean(durations)
d = {
f"{status_label}_total": total,
# f"{status_label}_mean": mean
}
car_type_status_dict[car_type].update(d)
car_type_status_dict[car_type].update(
{"total_duration": overall_duration}
)
self.adp.pk_delay.append(delays_stats)
self.adp.car_time.append(car_type_status_dict)
# Increment number of episodes
self.adp.n += 1
# Update reward and service rate tracks
self.adp.reward.append(step_log.total_reward)
self.adp.service_rate.append(step_log.service_rate)
if self.adp.weight_track is not None:
for car_type in CarType:
self.adp.weights[car_type.value].append(
self.adp.weight_track[car_type.value]
)
# Save intermediate plots
if plots:
# Fleet status (idle, recharging, rebalancing, servicing)
# step_log.plot_fleet_status(
# step_log.car_statuses,
# file_path=self.output_folder_fleet + f"{self.adp.n:04}_total",
# file_format="png",
# dpi=150,
# )
if total_trips:
self.plot_trip_delays(
trip_rejections,
trip_delays,
file_path=self.output_folder_delay + f"{self.adp.n:04}",
file_format="pdf",
dpi=150,
)
if step_log.env.config.fleet_size > 0:
step_log.plot_fleet_status(
step_log.pav_statuses,
file_path=self.output_folder_fleet
+ f"{self.adp.n:04}_pav",
**step_log.env.config.fleet_plot_config,
)
# step_log.plot_fleet_status_all(
# step_log.car_statuses, step_log.pav_statuses, step_log.fav_statuses,
# file_path=self.output_folder_fleet + f"{self.adp.n:04}_fav",
# file_format="png",
# dpi=150,
# )
if step_log.env.config.fav_fleet_size > 0:
step_log.plot_fleet_status(
step_log.fav_statuses,
file_path=self.output_folder_fleet
+ f"{self.adp.n:04}_fav",
**step_log.env.config.fleet_plot_config,
)
# Service status (battery level, demand, serviced demand)
step_log.plot_service_status(
file_path=self.output_folder_service + f"{self.adp.n:04}",
**step_log.env.config.demand_plot_config,
)
if save_df:
df_fleet = step_log.get_step_status_count()
df_fleet.to_csv(
self.folder_fleet_status_data
+ f"e_fleet_status_{self.adp.n:04}.csv"
)
df_demand = step_log.get_step_demand_status()
df_demand.to_csv(
self.folder_demand_status_data
+ f"e_demand_status_{self.adp.n:04}.csv"
)
if save_overall_stats:
cols, df_stats = step_log.get_step_stats()
# Add user stats
for sq, stats in sorted(
self.adp.pk_delay[-1].items(), key=lambda sq_stats: sq_stats[0]
):
for label, v in sorted(
stats.items(), key=lambda label_v: label_v[0]
):
col = f"{sq}_{label}"
cols.append(col)
df_stats[col] = v
# Add car stats
for car_type, stats in sorted(
self.adp.car_time[-1].items(),
key=lambda car_type_stats: car_type_stats[0],
):
for label, v in sorted(
stats.items(), key=lambda label_v: label_v[0]
):
col = f"{car_type}_{label}"
cols.append(col)
df_stats[col] = v
cols.append("time")
df_stats["time"] = pd.Series([processing_time])
# MPC optimal defines fleet sizes for each iteration based
# on trip data
if fleet_size is not None:
cols.append("fleet_size")
df_stats["fleet_size"] = pd.Series([fleet_size])
stats_file = self.output_path + "overall_stats.csv"
df_stats.to_csv(
stats_file,
mode="a",
index=False,
columns=cols,
header=not os.path.exists(stats_file),
)
# Save what was learned so far
if save_learning and self.adp and self.adp.n % save_learning == 0:
# t1 = time.time()
# adp_data = self.adp.current_data
# np.save("dic.npy", adp_data)
# print(time.time() - t1)
# t2 = time.time()
# adp_data_np = self.adp.current_data_np
# np.save("tuple.npy", adp_data_np)
# print(time.time() - t2)
# t3 = time.time()
# adp_data_np = self.adp.data
# np.save("tuple_np.npy", dict(adp_data_np))
# print(time.time() - t3)
# t3 = time.time()
# adp_data_np = self.adp.current_data_np2
# np.save("tuple2.npy", adp_data_np)
# print(time.time() - t3)
# For each:
# - Time step t,
# - Aggregation level g,
# - Attribute a
# - Save (value, count) tuple
# print("BEFORE TRANSFORMATION")
# pprint(self.adp.values)
# print("CURRENT DATA")
# pprint(self.adp.current_data)
np.save(
self.progress_path,
{
"episodes": self.adp.n,
"reward": self.adp.reward,
"service_rate": self.adp.service_rate,
"pk_delay": self.adp.pk_delay,
"car_time": self.adp.car_time,
"progress": self.adp.current_data,
"weights": self.adp.weights,
},
)
def load_progress(self):
"""Load episodes learned so farD
Returns:
values, counts -- Value functions and count per aggregation
level.
"""
(
self.adp.n,
self.adp.reward,
self.adp.service_rate,
self.adp.weights,
) = self.adp.read_progress(self.progress_path)
# print("After reading")
# pprint(self.adp.values)
def plot_weights(
self, file_path=None, file_format="png", dpi=150, scale="linear"
):
sns.set_context("paper")
def plot_series(weights, car_type="AV"):
series_list = [list(a) for a in zip(*weights)]
for series in series_list:
plt.plot(np.arange(self.adp.n), series)
plt.xlabel("Episodes")
plt.xscale(scale)
plt.ylabel("Weights")
plt.legend([f"Level {g}" for g in range(len(series_list))])
# Ticks
# plt.yticks(np.arange(1, step=0.05))
# plt.xticks(np.arange(self.adp.n))
if file_path:
plt.savefig(
f"{file_path}_{car_type}.{file_format}",
bbox_inches="tight",
dpi=dpi,
)
else:
plt.show()
# print("# Weights")
# pprint(self.adp.weights)
for car_type, weights in | |
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
#
# Univention Management Console module:
# Manage licenses
#
# Copyright 2021 Univention GmbH
#
# http://www.univention.de/
#
# All rights reserved.
#
# The source code of this program is made available
# under the terms of the GNU Affero General Public License version 3
# (GNU AGPL V3) as published by the Free Software Foundation.
#
# Binary versions of this program provided by Univention to you as
# well as other copyrighted, protected or trademarked materials like
# Logos, graphics, fonts, specific documentations and configurations,
# cryptographic keys etc. are subject to a license agreement between
# you and Univention and not subject to the GNU AGPL V3.
#
# In the case you use this program under the terms of the GNU AGPL V3,
# the program is provided in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License with the Debian GNU/Linux or Univention distribution in file
# /usr/share/common-licenses/AGPL-3; if not, see
# <http://www.gnu.org/licenses/>.
from typing import Callable, Dict, List, Optional, Union
from ldap.dn import is_dn
from ldap.filter import escape_filter_chars
from ucsschool.lib.models.group import SchoolClass, WorkGroup
from ucsschool.lib.models.user import User
from ucsschool.lib.school_umc_base import SchoolBaseModule, SchoolSanitizer
from ucsschool.lib.school_umc_ldap_connection import USER_WRITE, LDAP_Connection
from univention.admin.syntax import iso8601Date
from univention.bildungslogin.handlers import (
AssignmentHandler,
BiloCreateError,
LicenseHandler,
MetaDataHandler, ObjectType
)
from univention.bildungslogin.models import License, LicenseType, Role
from univention.bildungslogin.utils import ldap_escape
from univention.bildungslogin.license_import import import_license, load_license_file
from univention.lib.i18n import Translation
from univention.management.console.config import ucr
from univention.management.console.error import UMC_Error
from univention.management.console.log import MODULE
from univention.management.console.modules.decorators import sanitize
from univention.management.console.modules.sanitizers import (
BooleanSanitizer,
LDAPSearchSanitizer,
ListSanitizer,
StringSanitizer,
)
from univention.udm import UDM
from univention.udm.exceptions import SearchLimitReached
_ = Translation("ucs-school-umc-licenses").translate
def undefined_if_none(value, zero_as_none=False): # type: (Optional[int], bool) -> Union[unicode, int]
"""
Return "undefined" if the input value is None
If zero_as_none is set to True, returns "undefined" if value == 0
"""
if value is None or zero_as_none and value == 0:
return _("undefined")
return value
def optional_date2str(date):
if date:
return iso8601Date.from_datetime(date)
return ""
class Instance(SchoolBaseModule):
@sanitize(
isAdvancedSearch=BooleanSanitizer(required=True),
school=SchoolSanitizer(required=True),
onlyAvailableLicenses=BooleanSanitizer(required=True),
timeFrom=StringSanitizer(regex_pattern=iso8601Date.regex, allow_none=True, default=None),
timeTo=StringSanitizer(regex_pattern=iso8601Date.regex, allow_none=True, default=None),
publisher=LDAPSearchSanitizer(add_asterisks=False, default=""),
licenseType=ListSanitizer(sanitizer=LDAPSearchSanitizer(add_asterisks=False)),
userPattern=LDAPSearchSanitizer(default=""),
productId=LDAPSearchSanitizer(default=""),
product=LDAPSearchSanitizer(default=""),
licenseCode=LDAPSearchSanitizer(default=""),
pattern=LDAPSearchSanitizer(default=""),
allocationProductId=LDAPSearchSanitizer(add_asterisks=False, default=""),
)
@LDAP_Connection(USER_WRITE)
def licenses_query(self, request, ldap_user_write=None):
"""Searches for licenses
requests.options = {
isAdvancedSearch -- boolean
school -- str (schoolId)
timeFrom -- str (ISO 8601 date string)
timeTo -- str (ISO 8601 date string)
onlyAllocatableLicenses -- boolean
publisher -- str
licenseType -- list
userPattern -- str
productId -- str
product -- str
licenseCode -- str
pattern -- str
}
"""
MODULE.error("licenses.licenses_query: options: %s" % str(request.options))
sizelimit = int(ucr.get("directory/manager/web/sizelimit", 2000))
lh = LicenseHandler(ldap_user_write)
time_from = request.options.get("timeFrom")
time_from = iso8601Date.to_datetime(time_from) if time_from else None
time_to = request.options.get("timeTo")
time_to = iso8601Date.to_datetime(time_to) if time_to else None
try:
result = lh.search_for_licenses(
is_advanced_search=request.options.get("isAdvancedSearch"),
school=request.options.get("school"),
time_from=time_from,
time_to=time_to,
only_available_licenses=request.options.get("onlyAvailableLicenses"),
publisher=request.options.get("publisher"),
license_types=request.options.get("licenseType"),
user_pattern=request.options.get("userPattern"),
product_id=request.options.get("productId"),
product=request.options.get("product"),
license_code=request.options.get("licenseCode"),
pattern=request.options.get("pattern"),
restrict_to_this_product_id=request.options.get("allocationProductId"),
sizelimit=sizelimit,
)
except SearchLimitReached:
raise UMC_Error(
_(
"The query you have entered yields too many matching entries. "
"Please narrow down your search by specifying more query parameters. "
"The current size limit of {} can be configured with the UCR variable "
"directory/manager/web/sizelimit."
).format(sizelimit)
)
for res in result:
res["importDate"] = iso8601Date.from_datetime(res["importDate"])
res["validityStart"] = iso8601Date.from_datetime(res["validityStart"]) if res.get("validityStart") else None
res["validityEnd"] = iso8601Date.from_datetime(res["validityEnd"]) if res.get("validityEnd") else None
res["countAquired"] = undefined_if_none(res["countAquired"], zero_as_none=True)
res["countAvailable"] = undefined_if_none(res["countAvailable"])
res["countExpired"] = undefined_if_none(res["countExpired"])
MODULE.info("licenses.licenses_query: result: %s" % str(result))
self.finished(request.id, result)
@sanitize(
# school=StringSanitizer(required=True),
licenseCode=StringSanitizer(required=True),
)
@LDAP_Connection(USER_WRITE)
def licenses_get(self, request, ldap_user_write=None):
"""Get single license + meta data + assigned users
requests.options = {
# school -- schoolId
licenseCode -- str
}
"""
MODULE.info("licenses.get: options: %s" % str(request.options))
# TODO should the school be incorperated in getting the license?
# school = request.options.get("school")
license_code = request.options.get("licenseCode")
lh = LicenseHandler(ldap_user_write)
license = lh.get_license_by_code(license_code)
assigned_users = lh.get_assigned_users(license)
for assigned_user in assigned_users:
assigned_user["dateOfAssignment"] = iso8601Date.from_datetime(
assigned_user["dateOfAssignment"]
)
meta_data = lh.get_meta_data_for_license(license)
result = {
"countAquired": undefined_if_none(license.license_quantity, zero_as_none=True),
"countAssigned": lh.get_number_of_assigned_users(license),
"countAvailable": undefined_if_none(lh.get_number_of_available_users(license)),
"countExpired": undefined_if_none(lh.get_number_of_expired_unassigned_users(license)),
"ignore": license.ignored_for_display,
"importDate": iso8601Date.from_datetime(license.delivery_date),
"licenseCode": license.license_code,
"licenseTypeLabel": LicenseType.label(license.license_type),
"productId": license.product_id,
"reference": license.purchasing_reference,
"specialLicense": license.license_special_type,
"usage": license.utilization_systems,
"validityStart": optional_date2str(license.validity_start_date),
"validityEnd": optional_date2str(license.validity_end_date),
"validitySpan": license.validity_duration,
"author": meta_data.author,
"cover": meta_data.cover or meta_data.cover_small,
"productName": meta_data.title,
"publisher": meta_data.publisher,
"users": assigned_users,
}
MODULE.info("licenses.get: result: %s" % str(result))
self.finished(request.id, result)
@LDAP_Connection(USER_WRITE)
def publishers(self, request, ldap_user_write=None):
MODULE.info("licenses.publishers: options: %s" % str(request.options))
mh = MetaDataHandler(ldap_user_write)
result = [{"id": md.publisher, "label": md.publisher} for md in mh.get_all()]
MODULE.info("licenses.publishers: result: %s" % str(result))
self.finished(request.id, result)
@LDAP_Connection(USER_WRITE)
def license_types(self, request, ldap_user_write=None):
MODULE.info("licenses.license_types: options: %s" % str(request.options))
result = LicenseHandler.get_license_types()
MODULE.info("licenses.license_types: result: %s" % str(result))
self.finished(request.id, result)
@sanitize(
licenseCode=StringSanitizer(required=True),
ignore=BooleanSanitizer(required=True),
)
@LDAP_Connection(USER_WRITE)
def set_ignore(self, request, ldap_user_write=None):
"""Set 'ignored' attribute of a license
requests.options = {
licenseCode -- str
ignore -- boolean
}
"""
MODULE.info("licenses.set_ignore: options: %s" % str(request.options))
license_code = request.options.get("licenseCode")
ignore = request.options.get("ignore")
lh = LicenseHandler(ldap_user_write)
success = lh.set_license_ignore(license_code, ignore)
result = {
"errorMessage": ""
if success
else _(
"The 'Ignore' state cannot be changed because users are already assigned to the license."
)
}
MODULE.info("licenses.set_ignore: result: %s" % str(result))
self.finished(request.id, result)
@staticmethod
def _remove_from_objects(ldap_user_write, license_code, object_type, object_names):
""" Generic function for "remove_from_*" endpoints """
ah = AssignmentHandler(ldap_user_write)
failed_assignments = ah.remove_assignment_from_objects(license_code,
object_type,
object_names)
return {
"failedAssignments": [
{"username": fa[0], "error": fa[1]}
for fa in failed_assignments
]
}
@sanitize(licenseCode=StringSanitizer(required=True),
usernames=ListSanitizer(required=True))
@LDAP_Connection(USER_WRITE)
def remove_from_users(self, request, ldap_user_write=None):
"""
Remove ASSIGNED users from the license
requests.options = {
licenseCode -- str
usernames -- List[str]
}
"""
MODULE.info("licenses.remove_from_users: options: %s" % str(request.options))
result = self._remove_from_objects(ldap_user_write,
request.options.get("licenseCode"),
ObjectType.USER,
request.options.get("usernames"))
MODULE.info("licenses.remove_from_users: result: %s" % str(result))
self.finished(request.id, result)
@sanitize(licenseCode=StringSanitizer(required=True),
group=StringSanitizer(required=True))
@LDAP_Connection(USER_WRITE)
def remove_from_group(self, request, ldap_user_write=None):
"""
Remove ASSIGNED group from the license
requests.options = {
licenseCode -- str
group -- str
}
"""
MODULE.info("licenses.remove_from_group: options: %s" % str(request.options))
result = self._remove_from_objects(ldap_user_write,
request.options.get("licenseCode"),
ObjectType.GROUP,
[request.options.get("group")])
MODULE.info("licenses.remove_from_group: result: %s" % str(result))
self.finished(request.id, result)
@sanitize(licenseCode=StringSanitizer(required=True),
school=StringSanitizer(required=True))
@LDAP_Connection(USER_WRITE)
def remove_from_school(self, request, ldap_user_write=None):
"""
Remove ASSIGNED school from the license
requests.options = {
licenseCode -- str
school -- str
}
"""
MODULE.info("licenses.remove_from_school: options: %s" % str(request.options))
result = self._remove_from_objects(ldap_user_write,
request.options.get("licenseCode"),
ObjectType.SCHOOL,
[request.options.get("school")])
MODULE.info("licenses.remove_from_school: result: %s" % str(result))
self.finished(request.id, result)
@sanitize(licenseCodes=ListSanitizer(required=True),
usernames=ListSanitizer(required=True))
@LDAP_Connection(USER_WRITE)
def assign_to_users(self, request, ldap_user_write=None):
"""Assign licenses to users
requests.options = {
licenseCodes -- List[str]
usernames -- List[str]
}
"""
MODULE.info("licenses.assign_to_users: options: %s" % str(request.options))
ah = AssignmentHandler(ldap_user_write)
result = ah.assign_objects_to_licenses(request.options.get("licenseCodes"),
ObjectType.USER,
request.options.get("usernames"))
MODULE.info("licenses.assign_to_users: result: %s" % str(result))
self.finished(request.id, result)
@sanitize(licenseCodes=ListSanitizer(required=True),
school=StringSanitizer(required=True))
@LDAP_Connection(USER_WRITE)
def assign_to_school(self, request, ldap_user_write=None):
"""Assign licenses to a school
requests.options = {
licenseCodes -- List[str]
school -- str
}
"""
MODULE.info("licenses.assign_to_school: options: %s" % str(request.options))
ah = AssignmentHandler(ldap_user_write)
result = ah.assign_objects_to_licenses(request.options.get("licenseCodes"),
ObjectType.SCHOOL,
[request.options.get("school")])
MODULE.info("licenses.assign_to_school: result: %s" % str(result))
self.finished(request.id, result)
@sanitize(licenseCodes=ListSanitizer(required=True),
schoolClass=StringSanitizer(required=True))
@LDAP_Connection(USER_WRITE)
def assign_to_class(self, request, ldap_user_write=None):
"""Assign licenses to a class
requests.options = {
licenseCodes -- List[str]
schoolClass -- str
}
"""
MODULE.info("licenses.assign_to_class: options: %s" % str(request.options))
ah = AssignmentHandler(ldap_user_write)
result = ah.assign_objects_to_licenses(request.options.get("licenseCodes"),
ObjectType.GROUP,
[request.options.get("schoolClass")])
MODULE.info("licenses.assign_to_class: result: %s" % str(result))
self.finished(request.id, result)
@sanitize(licenseCodes=ListSanitizer(required=True),
workgroup=StringSanitizer(required=True))
@LDAP_Connection(USER_WRITE)
def assign_to_workgroup(self, request, ldap_user_write=None):
"""Assign licenses to a workgroup
requests.options = {
licenseCodes -- List[str]
workgroup -- str
}
"""
MODULE.info("licenses.assign_to_workgroup: options: %s" % str(request.options))
ah = AssignmentHandler(ldap_user_write)
result = ah.assign_objects_to_licenses(request.options.get("licenseCodes"),
ObjectType.GROUP,
[request.options.get("workgroup")])
MODULE.info("licenses.assign_to_workgroup: result: %s" % str(result))
self.finished(request.id, result)
@sanitize(
**{
"school": SchoolSanitizer(required=True),
"class": StringSanitizer(required=True),
"workgroup": StringSanitizer(required=True),
"pattern": LDAPSearchSanitizer(
required=True,
),
}
)
@LDAP_Connection(USER_WRITE)
def users_query(self, request, ldap_user_write=None):
"""Searches for users
requests.options = {
school
class
workgroup
pattern
}
"""
MODULE.info("licenses.query: options: %s" % str(request.options))
udm = UDM(ldap_user_write).version(1)
users_mod = udm.get("users/user")
school = request.options.get("school")
pattern = request.options.get("pattern")
workgroup = request.options.get("workgroup")
parts = [
"(school={})".format(escape_filter_chars(school)),
"(|(firstname={0})(lastname={0})(username={0}))".format(pattern),
]
if workgroup != "__all__":
parts.append("(memberOf={})".format(escape_filter_chars(workgroup)))
klass = request.options.get("class")
if klass != "__all__":
if is_dn(klass):
parts.append("(memberOf={})".format(escape_filter_chars(klass)))
else:
klass = LDAPSearchSanitizer().sanitize("p", {"p": klass})
filter_s = "(name={school}-{klass})".format(
school=escape_filter_chars(school), klass=ldap_escape(klass)
)
class_dns = [cls.dn for cls in SchoolClass.get_all(ldap_user_write, school, filter_s)]
if class_dns:
parts.append(
"(|{})".format(
"".join(["(memberOf={})".format(class_dn) for class_dn in class_dns])
)
)
else:
MODULE.info("licenses.query: result: %s" % str([]))
self.finished(request.id, [])
users_filter = "(&{})".format("".join(parts))
users = users_mod.search(users_filter)
workgroups = {wg.dn: wg.name for wg in WorkGroup.get_all(ldap_user_write, school)}
prefix = school + "-"
result = [
{
"firstname": user.props.firstname,
"lastname": user.props.lastname,
"username": user.props.username,
"role": Role.label(user.props.ucsschoolRole),
"class": ", ".join(
[
_cls[len(prefix):] if _cls.startswith(prefix) else _cls
for _cls in User.from_udm_obj(
user._orig_udm_object, school, ldap_user_write
).school_classes.get(school, [])
]
),
"workgroup": | |
np.allclose(
ivy.to_numpy(container_clipped.a),
np.array([[0.71749604, 1.9731141], [1.345305, 0.17937401]]),
)
def test_container_einsum(device, call):
dict_in = {
"a": ivy.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], device=device),
"b": {
"c": ivy.array([[2.0, 4.0], [6.0, 8.0], [10.0, 12.0]], device=device),
"d": ivy.array([[-2.0, -4.0], [-6.0, -8.0], [-10.0, -12.0]], device=device),
},
}
container = Container(dict_in)
container_einsummed = container.einsum("ij->i")
assert np.allclose(
ivy.to_numpy(container_einsummed["a"]), np.array([3.0, 7.0, 11.0])
)
assert np.allclose(ivy.to_numpy(container_einsummed.a), np.array([3.0, 7.0, 11.0]))
assert np.allclose(
ivy.to_numpy(container_einsummed["b"]["c"]), np.array([6.0, 14.0, 22.0])
)
assert np.allclose(
ivy.to_numpy(container_einsummed.b.c), np.array([6.0, 14.0, 22.0])
)
assert np.allclose(
ivy.to_numpy(container_einsummed["b"]["d"]), np.array([-6.0, -14.0, -22.0])
)
assert np.allclose(
ivy.to_numpy(container_einsummed.b.d), np.array([-6.0, -14.0, -22.0])
)
# def test_container_vector_norm(device, call):
# dict_in = {
# "a": ivy.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], device=device),
# "b": {
# "c": ivy.array([[2.0, 4.0], [6.0, 8.0], [10.0, 12.0]], device=device),
# "d": ivy.array([[3.0, 6.0], [9.0, 12.0], [15.0, 18.0]], device=device),
# },
# }
# container = Container(dict_in)
# container_normed = container.vector_norm(axis=(-1, -2))
# assert np.allclose(ivy.to_numpy(container_normed["a"]), 9.5394)
# assert np.allclose(ivy.to_numpy(container_normed.a), 9.5394)
# assert np.allclose(ivy.to_numpy(container_normed["b"]["c"]), 19.0788)
# assert np.allclose(ivy.to_numpy(container_normed.b.c), 19.0788)
# assert np.allclose(ivy.to_numpy(container_normed["b"]["d"]), 28.6182)
# assert np.allclose(ivy.to_numpy(container_normed.b.d), 28.6182)
def test_container_matrix_norm(device, call):
if call is helpers.mx_call:
# MXNet does not support matrix norm
pytest.skip()
dict_in = {
"a": ivy.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], device=device),
"b": {
"c": ivy.array([[2.0, 4.0], [6.0, 8.0], [10.0, 12.0]], device=device),
"d": ivy.array([[3.0, 6.0], [9.0, 12.0], [15.0, 18.0]], device=device),
},
}
container = Container(dict_in)
container_normed = container.matrix_norm()
assert np.allclose(ivy.to_numpy(container_normed["a"]), 9.52551809)
assert np.allclose(ivy.to_numpy(container_normed.a), 9.52551809)
assert np.allclose(ivy.to_numpy(container_normed["b"]["c"]), 19.05103618)
assert np.allclose(ivy.to_numpy(container_normed.b.c), 19.05103618)
assert np.allclose(ivy.to_numpy(container_normed["b"]["d"]), 28.57655427)
assert np.allclose(ivy.to_numpy(container_normed.b.d), 28.57655427)
def test_container_flip(device, call):
dict_in = {
"a": ivy.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], device=device),
"b": {
"c": ivy.array([[2.0, 4.0], [6.0, 8.0], [10.0, 12.0]], device=device),
"d": ivy.array([[-2.0, -4.0], [-6.0, -8.0], [-10.0, -12.0]], device=device),
},
}
container = Container(dict_in)
container_flipped = container.flip(-1)
assert np.allclose(
ivy.to_numpy(container_flipped["a"]),
np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]]),
)
assert np.allclose(
ivy.to_numpy(container_flipped.a),
np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]]),
)
assert np.allclose(
ivy.to_numpy(container_flipped["b"]["c"]),
np.array([[4.0, 2.0], [8.0, 6.0], [12.0, 10.0]]),
)
assert np.allclose(
ivy.to_numpy(container_flipped.b.c),
np.array([[4.0, 2.0], [8.0, 6.0], [12.0, 10.0]]),
)
assert np.allclose(
ivy.to_numpy(container_flipped["b"]["d"]),
np.array([[-4.0, -2.0], [-8.0, -6.0], [-12.0, -10.0]]),
)
assert np.allclose(
ivy.to_numpy(container_flipped.b.d),
np.array([[-4.0, -2.0], [-8.0, -6.0], [-12.0, -10.0]]),
)
def test_container_as_ones(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
container_ones = container.as_ones()
assert np.allclose(ivy.to_numpy(container_ones["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_ones.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container_ones["b"]["c"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_ones.b.c), np.array([1]))
assert np.allclose(ivy.to_numpy(container_ones["b"]["d"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_ones.b.d), np.array([1]))
def test_container_as_zeros(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
container_zeros = container.as_zeros()
assert np.allclose(ivy.to_numpy(container_zeros["a"]), np.array([0]))
assert np.allclose(ivy.to_numpy(container_zeros.a), np.array([0]))
assert np.allclose(ivy.to_numpy(container_zeros["b"]["c"]), np.array([0]))
assert np.allclose(ivy.to_numpy(container_zeros.b.c), np.array([0]))
assert np.allclose(ivy.to_numpy(container_zeros["b"]["d"]), np.array([0]))
assert np.allclose(ivy.to_numpy(container_zeros.b.d), np.array([0]))
def test_container_as_bools(device, call):
dict_in = {"a": ivy.array([1], device=device), "b": {"c": [], "d": True}}
container = Container(dict_in)
container_bools = container.as_bools()
assert container_bools["a"] is True
assert container_bools.a is True
assert container_bools["b"]["c"] is False
assert container_bools.b.c is False
assert container_bools["b"]["d"] is True
assert container_bools.b.d is True
def test_container_all_true(device, call):
assert not Container(
{"a": ivy.array([1], device=device), "b": {"c": [], "d": True}}
).all_true()
assert Container(
{"a": ivy.array([1], device=device), "b": {"c": [1], "d": True}}
).all_true()
# noinspection PyBroadException
try:
assert Container(
{"a": ivy.array([1], device=device), "b": {"c": [1], "d": True}}
).all_true(assert_is_bool=True)
error_raised = False
except AssertionError:
error_raised = True
assert error_raised
def test_container_all_false(device, call):
assert Container({"a": False, "b": {"c": [], "d": 0}}).all_false()
assert not Container({"a": False, "b": {"c": [1], "d": 0}}).all_false()
# noinspection PyBroadException
try:
assert Container(
{"a": ivy.array([1], device=device), "b": {"c": [1], "d": True}}
).all_false(assert_is_bool=True)
error_raised = False
except AssertionError:
error_raised = True
assert error_raised
def test_container_as_random_uniform(device, call):
dict_in = {
"a": ivy.array([1.0], device=device),
"b": {
"c": ivy.array([2.0], device=device),
"d": ivy.array([3.0], device=device),
},
}
container = Container(dict_in)
container_random = container.as_random_uniform()
assert (ivy.to_numpy(container_random["a"]) != np.array([1.0]))[0]
assert (ivy.to_numpy(container_random.a) != np.array([1.0]))[0]
assert (ivy.to_numpy(container_random["b"]["c"]) != np.array([2.0]))[0]
assert (ivy.to_numpy(container_random.b.c) != np.array([2.0]))[0]
assert (ivy.to_numpy(container_random["b"]["d"]) != np.array([3.0]))[0]
assert (ivy.to_numpy(container_random.b.d) != np.array([3.0]))[0]
def test_container_clone(device, call):
dict_in = {
"a": ivy.array([[1], [2], [3]], device=device),
"b": {
"c": ivy.array([[2], [3], [4]], device=device),
"d": ivy.array([[3], [4], [5]], device=device),
},
}
container = Container(dict_in)
# devices
devices = list()
device0 = device
devices.append(device0)
if "gpu" in device and ivy.num_gpus() > 1:
idx = ivy.num_gpus() - 1
device1 = device[:-1] + str(idx)
devices.append(device1)
# without key_chains specification
container_cloned = container.dev_clone(devices)
assert isinstance(container_cloned, ivy.DevClonedItem)
assert min([cont.dev_str == ds for ds, cont in container_cloned.items()])
assert ivy.Container.multi_map(
lambda xs, _: ivy.arrays_equal(xs), [c for c in container_cloned.values()]
).all_true()
@pytest.mark.parametrize("devs_as_dict", [True, False])
def test_container_distribute(devs_as_dict, device, call):
array_a = ivy.array([[1], [2], [3], [4]], device=device)
array_bc = ivy.array([[2], [3], [4], [5]], device=device)
array_bd = ivy.array([[3], [4], [5], [6]], device=device)
dict_in = {"a": array_a, "b": {"c": array_bc, "d": array_bd}}
container = Container(dict_in)
batch_size = array_a.shape[0]
if call is helpers.mx_call:
# MXNet does not support splitting along an axis with a remainder after division
pytest.skip()
# devices
dev0 = device
devices = [dev0]
if "gpu" in device and ivy.num_gpus() > 1:
idx = ivy.num_gpus() - 1
dev1 = device[:-1] + str(idx)
devices.append(dev1)
if devs_as_dict:
devices = dict(zip(devices, [int((1 / len(devices)) * 4)] * len(devices)))
num_devs = len(devices)
sub_size = int(batch_size / num_devs)
# without key_chains specification
container_dist = container.dev_dist(devices)
assert isinstance(container_dist, ivy.DevDistItem)
assert min([cont.dev_str == ds for ds, cont in container_dist.items()])
for i, sub_cont in enumerate(container_dist.values()):
assert np.array_equal(
ivy.to_numpy(sub_cont.a),
ivy.to_numpy(array_a)[i * sub_size : i * sub_size + sub_size],
)
assert np.array_equal(
ivy.to_numpy(sub_cont.b.c),
ivy.to_numpy(array_bc)[i * sub_size : i * sub_size + sub_size],
)
assert np.array_equal(
ivy.to_numpy(sub_cont.b.d),
ivy.to_numpy(array_bd)[i * sub_size : i * sub_size + sub_size],
)
def test_container_unstack(device, call):
dict_in = {
"a": ivy.array([[1], [2], [3]], device=device),
"b": {
"c": ivy.array([[2], [3], [4]], device=device),
"d": ivy.array([[3], [4], [5]], device=device),
},
}
container = Container(dict_in)
# without key_chains specification
container_unstacked = container.unstack(0)
for cont, a, bc, bd in zip(container_unstacked, [1, 2, 3], [2, 3, 4], [3, 4, 5]):
assert np.array_equal(ivy.to_numpy(cont["a"]), np.array([a]))
assert np.array_equal(ivy.to_numpy(cont.a), np.array([a]))
assert np.array_equal(ivy.to_numpy(cont["b"]["c"]), np.array([bc]))
assert np.array_equal(ivy.to_numpy(cont.b.c), np.array([bc]))
assert np.array_equal(ivy.to_numpy(cont["b"]["d"]), np.array([bd]))
assert np.array_equal(ivy.to_numpy(cont.b.d), np.array([bd]))
def test_container_split(device, call):
dict_in = {
"a": ivy.array([[1], [2], [3]], device=device),
"b": {
"c": ivy.array([[2], [3], [4]], device=device),
"d": ivy.array([[3], [4], [5]], device=device),
},
}
container = Container(dict_in)
# without key_chains specification
container_split = container.split(1, -1)
for cont, a, bc, bd in zip(container_split, [1, 2, 3], [2, 3, 4], [3, 4, 5]):
assert np.array_equal(ivy.to_numpy(cont["a"])[0], np.array([a]))
assert np.array_equal(ivy.to_numpy(cont.a)[0], np.array([a]))
assert np.array_equal(ivy.to_numpy(cont["b"]["c"])[0], np.array([bc]))
assert np.array_equal(ivy.to_numpy(cont.b.c)[0], np.array([bc]))
assert np.array_equal(ivy.to_numpy(cont["b"]["d"])[0], np.array([bd]))
assert np.array_equal(ivy.to_numpy(cont.b.d)[0], np.array([bd]))
def test_container_gather(device, call):
dict_in = {
"a": ivy.array([1, 2, 3, 4, 5, 6], device=device),
"b": {
"c": ivy.array([2, 3, 4, 5], device=device),
"d": ivy.array([10, 9, 8, 7, 6], device=device),
},
}
container = Container(dict_in)
# without key_chains specification
container_gathered = container.gather(ivy.array([1, 3], device=device))
assert np.allclose(ivy.to_numpy(container_gathered["a"]), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered["b"]["c"]), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered["b"]["d"]), np.array([9, 7]))
assert np.allclose(ivy.to_numpy(container_gathered.b.d), np.array([9, 7]))
# with key_chains to apply
container_gathered = container.gather(
ivy.array([1, 3], device=device), -1, ["a", "b/c"]
)
assert np.allclose(ivy.to_numpy(container_gathered["a"]), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered["b"]["c"]), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([3, 5]))
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["d"]), np.array([10, 9, 8, 7, 6])
)
assert np.allclose(ivy.to_numpy(container_gathered.b.d), np.array([10, 9, 8, 7, 6]))
# with key_chains to apply pruned
container_gathered = container.gather(
ivy.array([1, 3], device=device), -1, ["a", "b/c"], prune_unapplied=True
)
assert np.allclose(ivy.to_numpy(container_gathered["a"]), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered["b"]["c"]), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([3, 5]))
assert "b/d" not in container_gathered
# with key_chains to not apply
container_gathered = container.gather(
ivy.array([1, 3], device=device),
-1,
Container({"a": None, "b": {"d": None}}),
to_apply=False,
)
assert np.allclose(
ivy.to_numpy(container_gathered["a"]), np.array([1, 2, 3, 4, 5, 6])
)
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([1, 2, 3, 4, 5, 6]))
assert np.allclose(ivy.to_numpy(container_gathered["b"]["c"]), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([3, 5]))
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["d"]), np.array([10, 9, 8, 7, 6])
)
assert np.allclose(ivy.to_numpy(container_gathered.b.d), np.array([10, 9, 8, 7, 6]))
# with key_chains to not apply pruned
container_gathered = container.gather(
ivy.array([1, 3], device=device),
-1,
Container({"a": None, "b": {"d": None}}),
to_apply=False,
prune_unapplied=True,
)
assert "a" not in container_gathered
assert np.allclose(ivy.to_numpy(container_gathered["b"]["c"]), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([3, 5]))
assert "b/d" not in container_gathered
def test_container_gather_nd(device, call):
dict_in = {
"a": ivy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], device=device),
"b": {
"c": ivy.array([[[8, 7], [6, 5]], [[4, 3], [2, 1]]], device=device),
"d": ivy.array([[[2, 4], [6, 8]], [[10, 12], [14, 16]]], device=device),
},
}
container = Container(dict_in)
# without key_chains specification
container_gathered = container.gather_nd(ivy.array([[0, 1], [1, 0]], device=device))
assert np.allclose(
ivy.to_numpy(container_gathered["a"]), np.array([[3, | |
import sys
import yaml
import caffe
import numpy as np
from scipy.special import gamma
from scipy.special import gammaln
from scipy.special import polygamma
from scipy.stats import beta
# assign points to grid bins
def getPlaces(x, grid):
places_to_bins = dict() # i of sorted x to j in grid
bins_to_places = dict()
for i in xrange(len(grid)):
bins_to_places[i] = list()
inx_sorted = np.argsort(x)
ind = 1
# find initial bucket :
for i in xrange(len(grid)):
if x[inx_sorted[0]] > grid[i]:
ind = i + 1
else:
break
x_start = 0
while x[inx_sorted[x_start]] < grid[0]:
x_start += 1
for i in xrange(x_start, len(x)):
while x[inx_sorted[i]] > grid[ind]:
ind += 1
if ind >= len(grid):
return places_to_bins, bins_to_places
places_to_bins[inx_sorted[i]] = ind
bins_to_places[ind].append(inx_sorted[i])
return places_to_bins, bins_to_places
# estimate the histogram using the assigments of points to grid bins
def getDistributionDensity(x, bins_to_places, grid, grid_delta):
p = np.zeros_like(grid)
for i in xrange(len(grid)):
left_add = 0
if i > 0:
d_i_list_left = np.array(bins_to_places[i])
left_dist = np.array([x[ii] for ii in d_i_list_left])
left_add = sum(left_dist - grid[i - 1])
right_add = 0
if i < len(grid) - 1:
d_i_list_right = np.array(bins_to_places[i + 1])
right_dist = np.array([x[ii] for ii in d_i_list_right])
right_add = sum(grid[i + 1] - right_dist)
p[i] = (left_add + right_add)
p /= len(x) * grid_delta
return p
# def calculateNPGradOverBins(d_pos, distr_pos, d_neg, distr_neg, grid_delta):
# dldp = np.cumsum(distr_neg[::-1])[::-1]
# dldn = np.cumsum(distr_pos)
#
# grad_pos = dldp[:]
# grad_pos[1:] = (grad_pos[1:] - grad_pos[:-1])
# grad_pos /= grid_delta*len(d_pos)
#
# grad_neg = dldn[:]
# grad_neg[1:] = (grad_neg[1:] - grad_neg[:-1])
# grad_neg/= grid_delta*len(d_neg)
# return grad_pos, grad_neg
def calculateLossGradOverDistribution(distr_pos, distr_neg, L):
grad_pos = np.dot(L, distr_neg)
grad_neg = np.dot(distr_pos, L)
return grad_pos, grad_neg
def calculateLossGradOverBinsForHist(d_pos, d_neg, grid_delta, grad_pos, grad_neg):
grad_pos[1:] = (grad_pos[1:] - grad_pos[:-1])
grad_pos /= grid_delta * len(d_pos)
grad_neg[1:] = (grad_neg[1:] - grad_neg[:-1])
grad_neg /= grid_delta * len(d_neg)
return grad_pos, grad_neg
def getGradOverData(data, grad_over_bins, places_to_bins):
grad = []
for i in xrange(len(data)):
grad.append(grad_over_bins[places_to_bins[i]])
return np.array(grad)
##################### Beta-distribution fitting and gradient ##########################################################
# estimate beta-distribution
def getBetaDistributionDensity(x, grid, grid_delta):
grid = np.array(np.copy(grid))
x = np.array([x[i] for i in xrange(len(x)) if x[i] >= -1 and x[i] <= 1])
x_scaled = (x + 1.) / 2.
mean = np.mean(x_scaled)
var = np.var(x_scaled, ddof=1)
alpha1 = mean ** 2 * (1 - mean) / var - mean
beta1 = alpha1 * (1 - mean) / mean
fitted = lambda x, a, b: gamma(a + b) / gamma(a) / gamma(b) * x ** (a - 1) * (1 - x) ** (b - 1) # pdf of beta
grid_scaled = np.array((grid + 1) / 2)
### to avoid zero devision errors
grid_scaled[0] = 1e-5
grid_scaled[len(grid_scaled) - 1] = 0.999
distr_ = beta.pdf(grid_scaled, alpha1, beta1) * grid_delta / (2.)
return distr_
def gamma_derivative(x):
return polygamma(0, x) * gamma(x)
def dvardx(x):
meanx_ = np.mean(x)
expr1 = (x - meanx_) * (-1) * 2.0 / (len(x) - 1) / len(x)
expr3 = np.ones((1, len(x))) * np.sum(expr1) * 2.0 / (len(x) - 1) / len(x)
expr4 = (x - meanx_) * 2. / (len(x) - 1)
dvardx = expr3 + expr4
return dvardx
def calculateLossGradOverDataForBeta(d_pos, d_neg, grid, grid_delta, grad_pos, grad_neg):
grid = np.array(np.copy(grid))
# scale grid
grid = np.array((grid + 1.) / 2.)
### to avoid zero devision errors
grid[0] = 1e-5
grid[len(grid) - 1] = 0.999
d_pos[d_pos >= 1] = 1
d_pos[d_pos <= -1] = -1
d_pos_scaled = (d_pos + 1.) / 2.
mean_pos = np.mean(d_pos_scaled)
var_pos = np.var(d_pos_scaled, ddof=1)
alpha_pos = mean_pos ** 2 * (1 - mean_pos) / var_pos - mean_pos
beta_pos = alpha_pos * (1 - mean_pos) / mean_pos
d_neg[d_neg >= 1] = 1
d_neg[d_neg <= -1] = -1
d_neg_scaled = (d_neg + 1.) / 2.
mean_neg = np.mean(d_neg_scaled)
var_neg = np.var(d_neg_scaled, ddof=1)
alpha_neg = mean_neg ** 2 * (1 - mean_neg) / var_neg - mean_neg
beta_neg = alpha_neg * (1 - mean_neg) / mean_neg
# dLd_distr - checked
dldp = grad_pos
dldn = grad_neg
# dmeandx - checked
dmean_posdd_pos = np.ones((1, len(d_pos))) * 1.0 / len(d_pos)
dmean_negdd_neg = np.ones((1, len(d_neg))) * 1.0 / len(d_neg)
# dvardx - checked
dvar_posdd_pos = dvardx(d_pos_scaled)
dvar_negdd_neg = dvardx(d_neg_scaled)
######## d alpha/beta d mean/var
# checked
dalpha_dmean_pos = 1. / var_pos * (2 * mean_pos - 3 * mean_pos ** 2) - 1 + \
mean_pos ** 2 * (1 - mean_pos) / var_pos ** 2 / (len(d_pos) - 1) * (
2 * np.sum(d_pos_scaled - mean_pos))
dalpha_dmean_neg = 1. / var_neg * (2 * mean_neg - 3 * mean_neg ** 2) - 1 + \
mean_neg ** 2 * (1 - mean_neg) / var_neg ** 2 / (len(d_neg) - 1) * (
2 * np.sum(d_neg_scaled - mean_neg))
# checked
dalpha_dvar_pos = -(mean_pos) ** 2 * (1 - mean_pos) * (var_pos) ** (-2)
dalpha_dvar_neg = -(mean_neg) ** 2 * (1 - mean_neg) * (var_neg) ** (-2)
# checked
dbeta_dmean_pos = -alpha_pos / (mean_pos) ** 2 + (1 - mean_pos) / mean_pos * dalpha_dmean_pos
dbeta_dmean_neg = -alpha_neg / (mean_neg) ** 2 + (1 - mean_neg) / mean_neg * dalpha_dmean_neg
# checked
dbeta_dvar_pos = (1 - mean_pos) / mean_pos * dalpha_dvar_pos
dbeta_dvar_neg = (1 - mean_neg) / mean_neg * dalpha_dvar_neg
###### d aplha/beta d x - checheked
dalpha_dd_pos = dalpha_dmean_pos * dmean_posdd_pos + dalpha_dvar_pos * dvar_posdd_pos
dalpha_dd_neg = dalpha_dmean_neg * dmean_negdd_neg + dalpha_dvar_neg * dvar_negdd_neg
dbeta_dd_pos = dbeta_dmean_pos * dmean_posdd_pos + dbeta_dvar_pos * dvar_posdd_pos
dbeta_dd_neg = dbeta_dmean_neg * dmean_negdd_neg + dbeta_dvar_neg * dvar_negdd_neg
### d distr(p/n) d alpha/beta
gammaTerm_pos = np.exp(gammaln(alpha_pos + beta_pos) - gammaln(alpha_pos) - \
gammaln(beta_pos))
gammaTerm_neg = np.exp(gammaln(alpha_neg + beta_neg) - gammaln(alpha_neg) - \
gammaln(beta_neg))
# checked
dGammaTerm_dalpha_pos = gammaTerm_pos * (polygamma(0, alpha_pos + beta_pos) - polygamma(0, alpha_pos))
dGammaTerm_dalpha_neg = gammaTerm_neg * (polygamma(0, alpha_neg + beta_neg) - polygamma(0, alpha_neg))
# checked
dGammaTerm_dbeta_pos = gammaTerm_pos * (polygamma(0, alpha_pos + beta_pos) - polygamma(0, beta_pos))
dGammaTerm_dbeta_neg = gammaTerm_neg * (polygamma(0, alpha_neg + beta_neg) - polygamma(0, beta_neg))
dpdalpha_pos = (dGammaTerm_dalpha_pos * grid ** (alpha_pos - 1) * (1 - grid) ** (beta_pos - 1) +
gammaTerm_pos * grid ** (alpha_pos - 1) * np.log(grid) * (1 - grid) ** (
beta_pos - 1)) * grid_delta / 2.
dndalpha_neg = (dGammaTerm_dalpha_neg * grid ** (alpha_neg - 1) * (1 - grid) ** (beta_neg - 1) +
gammaTerm_neg * grid ** (alpha_neg - 1) * np.log(grid) * (1 - grid) ** (
beta_neg - 1)) * grid_delta / 2.
dpdbeta_pos = (dGammaTerm_dbeta_pos * grid ** (alpha_pos - 1) * (1 - grid) ** (beta_pos - 1) +
gammaTerm_pos * grid ** (alpha_pos - 1) * (1 - grid) ** (beta_pos - 1) * np.log(
1 - grid)) * grid_delta / 2.
dndbeta_neg = (dGammaTerm_dbeta_neg * grid ** (alpha_neg - 1) * (1 - grid) ** (beta_neg - 1) +
gammaTerm_neg * grid ** (alpha_neg - 1) * (1 - grid) ** (beta_neg - 1) * np.log(
1 - grid)) * grid_delta / 2.
# d distr d x
# matrix : grid X number of points
dpdd_pos = np.dot(dpdalpha_pos.T.reshape((len(grid), 1)), dalpha_dd_pos) + \
np.dot(dpdbeta_pos.T.reshape((len(grid), 1)), dbeta_dd_pos)
dndd_neg = np.dot(dndalpha_neg.T.reshape((len(grid), 1)), dalpha_dd_neg) + \
np.dot(dndbeta_neg.T.reshape((len(grid), 1)), dbeta_dd_neg)
############# FINAL GRADIENT
grad_pos = np.dot(dldp.reshape((1, len(grid))), dpdd_pos)
grad_neg = np.dot(dldn.reshape((1, len(grid))), dndd_neg)
# need scaling as beta distribution is fitted on scaled data
return np.array(grad_pos / 2.).reshape(len(d_pos)), np.array(grad_neg / 2.).reshape(len(d_neg))
#######################################################################################################################
LOSS_SIMPLE = 'simple'
LOSS_LINEAR = 'linear'
LOSS_EXP = 'exp'
DISTR_TYPE_HIST = 'hist'
DISTR_TYPE_BETA = 'beta'
# Calculates probability of wrong order in pairs' similarities: positive pair less similar than negative one
# (this corresponds to 'simple' loss, other variants ('linear', 'exp') are generalizations that take into account
# not only the order but also the difference between the two similarity values).
# Can use histogram and beta-distribution to fit input data.
class DistributionLossLayer(caffe.Layer):
def getL(self):
L = np.ones((len(self.grid), len(self.grid)))
if self.loss == LOSS_SIMPLE:
for i in xrange(len(self.grid)):
L[i] = self.grid[i] <= self.grid
elif self.loss == LOSS_LINEAR:
for i in xrange(len(self.grid)):
L[i] = self.margin - self.grid[i] + self.grid
L[L < 0] = 0
elif self.loss == LOSS_EXP:
for i in xrange(len(self.grid)):
L[i] = np.log(np.exp(self.alpha * (self.margin + self.grid - self.grid[i])) + 1)
return L
def setup(self, bottom, top):
# | |
AssertionError:
raise RuntimeError("Invalid package specification: %r" % spec)
except AttributeError:
raise RuntimeError("Received dictionary as spec. Note that pip requirements are "
"not supported in conda-build meta.yaml.")
if ms.name == self.name():
raise RuntimeError("%s cannot depend on itself" % self.name())
for name, ver in name_ver_list:
if ms.name == name:
if self.noarch:
continue
for c in '=!@#$%^&*:;"\'\\|<>?/':
if c in ms.name:
sys.exit("Error: bad character '%s' in package name "
"dependency '%s'" % (c, ms.name))
parts = spec.split()
if len(parts) >= 2:
if parts[1] in {'>', '>=', '=', '==', '!=', '<', '<='}:
msg = ("Error: bad character '%s' in package version "
"dependency '%s'" % (parts[1], ms.name))
if len(parts) >= 3:
msg += "\nPerhaps you meant '%s %s%s'" % (ms.name,
parts[1], parts[2])
sys.exit(msg)
res.append(ms)
return res
def _get_hash_contents(self):
sections = ['source', 'requirements', 'build']
# make a copy of values, so that no sorting occurs in place
composite = HashableDict({section: copy.copy(self.get_section(section))
for section in sections})
outputs = self.get_section('outputs')
if outputs:
outs = []
for out in outputs:
out = copy.copy(out)
# files are dynamically determined, and there's no way to match them at render time.
# we need to exclude them from the hash.
if 'files' in out:
del out['files']
outs.append(out)
composite.update({'outputs': [HashableDict(out) for out in outs]})
# filter build requirements for ones that should not be in the hash
requirements = composite.get('requirements', {})
build_reqs = requirements.get('build', [])
excludes = self.config.variant.get('exclude_from_build_hash', [])
if excludes:
pattern = re.compile('|'.join('{}[\s$]?'.format(exc) for exc in excludes))
build_reqs = [req for req in build_reqs if not pattern.match(req)]
requirements['build'] = build_reqs
composite['requirements'] = requirements
# remove the build number from the hash, so that we can bump it without changing the hash
if 'number' in composite['build']:
del composite['build']['number']
# remove the build string, so that hashes don't affect themselves
if 'string' in composite['build']:
del composite['build']['string']
if not composite['build']:
del composite['build']
for key in 'build', 'run':
if key in composite['requirements'] and not composite['requirements'].get(key):
del composite['requirements'][key]
trim_empty_keys(composite)
file_paths = []
if self.path:
recorded_input_files = os.path.join(self.path, '..', 'hash_input_files')
if os.path.exists(recorded_input_files):
with open(recorded_input_files) as f:
file_paths = f.read().splitlines()
else:
files = utils.rec_glob(self.path, "*")
file_paths = sorted([f.replace(self.path + os.sep, '') for f in files])
# exclude meta.yaml and meta.yaml.template, because the json dictionary captures
# their content
file_paths = [f for f in file_paths if not f.startswith('meta.yaml')]
file_paths = sorted(filter_files(file_paths, self.path))
return composite, file_paths
def _hash_dependencies(self):
"""With arbitrary pinning, we can't depend on the build string as done in
build_string_from_metadata - there's just too much info. Instead, we keep that as-is, to
not be disruptive, but we add this extra hash, which is just a way of distinguishing files
on disk. The actual determination of dependencies is done in the repository metadata."""
# save only the first HASH_LENGTH characters - should be more than enough, since these only
# need to be unique within one version
# plus one is for the h - zero pad on the front, trim to match HASH_LENGTH
recipe_input, file_paths = self._get_hash_contents()
hash_ = hashlib.sha1(json.dumps(recipe_input, sort_keys=True).encode())
for recipe_file in file_paths:
with open(os.path.join(self.path, recipe_file), 'rb') as f:
hash_.update(f.read())
hash_ = 'h{0}'.format(hash_.hexdigest())[:self.config.hash_length + 1]
return hash_
def build_id(self):
out = self.get_value('build/string')
if out:
check_bad_chrs(out, 'build/string')
else:
out = build_string_from_metadata(self)
if not re.findall('h[0-9a-f]{%s}' % self.config.hash_length, out):
ret = out.rsplit('_', 1)
try:
int(ret[0])
out = self._hash_dependencies() + '_' + str(ret[0])
except ValueError:
out = ret[0] + self._hash_dependencies()
if len(ret) > 1:
out = '_'.join([out] + ret[1:])
else:
out = re.sub('h[0-9a-f]{%s}' % self.config.hash_length, self._hash_dependencies(), out)
return out
@property
def noarch(self):
return self.get_value('build/noarch_python') or self.get_value('build/noarch')
def dist(self):
return '%s-%s-%s' % (self.name(), self.version(), self.build_id())
def pkg_fn(self):
return "%s.tar.bz2" % self.dist()
def is_app(self):
return bool(self.get_value('app/entry'))
def app_meta(self):
d = {'type': 'app'}
if self.get_value('app/icon'):
d['icon'] = '%s.png' % md5_file(join(
self.path, self.get_value('app/icon')))
for field, key in [('app/entry', 'app_entry'),
('app/type', 'app_type'),
('app/cli_opts', 'app_cli_opts'),
('app/summary', 'summary'),
('app/own_environment', 'app_own_environment')]:
value = self.get_value(field)
if value:
d[key] = value
return d
def info_index(self):
arch = self.config.host_arch or self.config.arch
d = dict(
name=self.name(),
version=self.version(),
build=self.build_id(),
build_number=self.build_number() if self.build_number() else 0,
platform=self.config.platform,
arch=ARCH_MAP.get(arch, arch),
subdir=self.config.host_subdir,
depends=sorted(' '.join(ms.spec.split())
for ms in self.ms_depends()),
)
for key in ('license', 'license_family'):
value = self.get_value('about/' + key)
if value:
d[key] = value
preferred_env = self.get_value('build/preferred_env')
if preferred_env:
d['preferred_env'] = preferred_env
if self.get_value('build/features'):
d['features'] = ' '.join(self.get_value('build/features'))
if self.get_value('build/track_features'):
d['track_features'] = ' '.join(self.get_value('build/track_features'))
if self.noarch:
d['platform'] = d['arch'] = None
d['subdir'] = 'noarch'
# These are new-style noarch settings. the self.noarch setting can be True in 2 ways:
# if noarch: True or if noarch_python: True. This is disambiguation.
build_noarch = self.get_value('build/noarch')
if build_noarch:
d['noarch'] = build_noarch
if self.is_app():
d.update(self.app_meta())
return d
def has_prefix_files(self):
ret = ensure_list(self.get_value('build/has_prefix_files', []))
if not isinstance(ret, list):
raise RuntimeError('build/has_prefix_files should be a list of paths')
if sys.platform == 'win32':
if any('\\' in i for i in ret):
raise RuntimeError("build/has_prefix_files paths must use / "
"as the path delimiter on Windows")
return expand_globs(ret, self.config.build_prefix)
def ignore_prefix_files(self):
ret = self.get_value('build/ignore_prefix_files', False)
if type(ret) not in (list, bool):
raise RuntimeError('build/ignore_prefix_files should be boolean or a list of paths '
'(optionally globs)')
if sys.platform == 'win32':
if type(ret) is list and any('\\' in i for i in ret):
raise RuntimeError("build/ignore_prefix_files paths must use / "
"as the path delimiter on Windows")
return expand_globs(ret, self.config.build_prefix) if type(ret) is list else ret
def always_include_files(self):
files = ensure_list(self.get_value('build/always_include_files', []))
if any('\\' in i for i in files):
raise RuntimeError("build/always_include_files paths must use / "
"as the path delimiter on Windows")
if on_win:
files = [f.replace("/", "\\") for f in files]
return expand_globs(files, self.config.build_prefix)
def binary_relocation(self):
ret = self.get_value('build/binary_relocation', True)
if type(ret) not in (list, bool):
raise RuntimeError('build/ignore_prefix_files should be boolean or a list of paths '
'(optionally globs)')
if sys.platform == 'win32':
if type(ret) is list and any('\\' in i for i in ret):
raise RuntimeError("build/ignore_prefix_files paths must use / "
"as the path delimiter on Windows")
return expand_globs(ret, self.config.build_prefix) if type(ret) is list else ret
def include_recipe(self):
return self.get_value('build/include_recipe', True)
def binary_has_prefix_files(self):
ret = ensure_list(self.get_value('build/binary_has_prefix_files', []))
if not isinstance(ret, list):
raise RuntimeError('build/binary_has_prefix_files should be a list of paths')
if sys.platform == 'win32':
if any('\\' in i for i in ret):
raise RuntimeError("build/binary_has_prefix_files paths must use / "
"as the path delimiter on Windows")
return expand_globs(ret, self.config.build_prefix)
def skip(self):
return self.get_value('build/skip', False)
def _get_contents(self, permit_undefined_jinja):
'''
Get the contents of our [meta.yaml|conda.yaml] file.
If jinja is installed, then the template.render function is called
before standard conda macro processors.
permit_undefined_jinja: If True, *any* use of undefined jinja variables will
evaluate to an emtpy string, without emitting an error.
'''
try:
import jinja2
except ImportError:
print("There was an error importing jinja2.", file=sys.stderr)
print("Please run `conda install jinja2` to enable jinja template support", file=sys.stderr) # noqa
with open(self.meta_path) as fd:
return fd.read()
from conda_build.jinja_context import context_processor, UndefinedNeverFail, FilteredLoader
path, filename = os.path.split(self.meta_path)
loaders = [ # search relative to '<conda_root>/Lib/site-packages/conda_build/templates'
jinja2.PackageLoader('conda_build'),
# search relative to RECIPE_DIR
jinja2.FileSystemLoader(path)
]
# search relative to current conda environment directory
conda_env_path = os.environ.get('CONDA_DEFAULT_ENV') # path to current conda environment
if conda_env_path and os.path.isdir(conda_env_path):
conda_env_path = os.path.abspath(conda_env_path)
conda_env_path = conda_env_path.replace('\\', '/') # need unix-style path
env_loader = jinja2.FileSystemLoader(conda_env_path)
loaders.append(jinja2.PrefixLoader({'$CONDA_DEFAULT_ENV': env_loader}))
undefined_type = jinja2.StrictUndefined
if permit_undefined_jinja:
# The UndefinedNeverFail class keeps a global list of all undefined names
# Clear any leftover names from the last parse.
UndefinedNeverFail.all_undefined_names = []
undefined_type = UndefinedNeverFail
loader = FilteredLoader(jinja2.ChoiceLoader(loaders), config=self.config)
env = jinja2.Environment(loader=loader, undefined=undefined_type)
env.globals.update(ns_cfg(self.config))
env.globals.update(context_processor(self, path, config=self.config,
permit_undefined_jinja=permit_undefined_jinja))
# Future goal here. Not supporting jinja2 on replaced sections right now.
# we write a temporary file, so that we can dynamically replace sections in the meta.yaml
# file on disk. These replaced sections also need to have jinja2 filling in templates.
# The really hard part here is that we need to operate on plain text, because we need to
# keep selectors and all that.
try:
template = env.get_or_select_template(filename)
rendered = template.render(environment=env)
if permit_undefined_jinja:
self.undefined_jinja_vars = UndefinedNeverFail.all_undefined_names
else:
self.undefined_jinja_vars = []
except jinja2.TemplateError as ex:
if "'None' | |
#!/usr/bin/env python
import abc
import copy
import logging
import os
import re
import time
import requests
from yaml import dump
from yaml import load
import latency_unit
import status as st
# This is the mandatory fields that must be in the configuration file in this
# same exact structure.
configuration_mandatory_fields = {
'endpoint': ['url', 'method', 'timeout', 'expectation'],
'cachet': ['api_url', 'token', 'component_id'],
'frequency': []}
class ConfigurationValidationError(Exception):
"""Exception raised when there's a validation error."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ComponentNonexistentError(Exception):
"""Exception raised when the component does not exist."""
def __init__(self, component_id):
self.component_id = component_id
def __str__(self):
return repr('Component with id [%d] does not exist.' % (self.component_id,))
class MetricNonexistentError(Exception):
"""Exception raised when the component does not exist."""
def __init__(self, metric_id):
self.metric_id = metric_id
def __str__(self):
return repr('Metric with id [%d] does not exist.' % (self.metric_id,))
def get_current_status(endpoint_url, component_id, headers):
"""Retrieves the current status of the component that is being monitored. It will fail if the component does
not exist or doesn't respond with the expected data.
:return component status.
"""
get_status_request = requests.get('%s/components/%s' % (endpoint_url, component_id), headers=headers)
if get_status_request.ok:
# The component exists.
return get_status_request.json()['data']['status']
else:
raise ComponentNonexistentError(component_id)
def normalize_url(url):
"""If passed url doesn't include schema return it with default one - http."""
if not url.lower().startswith('http'):
return 'http://%s' % url
return url
class Configuration(object):
"""Represents a configuration file, but it also includes the functionality
of assessing the API and pushing the results to cachet.
"""
def __init__(self, config_file):
self.logger = logging.getLogger('cachet_url_monitor.configuration.Configuration')
self.config_file = config_file
self.data = load(file(self.config_file, 'r'))
self.current_fails = 0
self.trigger_update = True
# Exposing the configuration to confirm it's parsed as expected.
self.print_out()
# We need to validate the configuration is correct and then validate the component actually exists.
self.validate()
# We store the main information from the configuration file, so we don't keep reading from the data dictionary.
self.headers = {'X-Cachet-Token': os.environ.get('CACHET_TOKEN') or self.data['cachet']['token']}
self.endpoint_method = os.environ.get('ENDPOINT_METHOD') or self.data['endpoint']['method']
self.endpoint_url = os.environ.get('ENDPOINT_URL') or self.data['endpoint']['url']
self.endpoint_url = normalize_url(self.endpoint_url)
self.endpoint_timeout = os.environ.get('ENDPOINT_TIMEOUT') or self.data['endpoint'].get('timeout') or 1
self.allowed_fails = os.environ.get('ALLOWED_FAILS') or self.data['endpoint'].get('allowed_fails') or 0
self.api_url = os.environ.get('CACHET_API_URL') or self.data['cachet']['api_url']
self.component_id = os.environ.get('CACHET_COMPONENT_ID') or self.data['cachet']['component_id']
self.metric_id = os.environ.get('CACHET_METRIC_ID') or self.data['cachet'].get('metric_id')
if self.metric_id is not None:
self.default_metric_value = self.get_default_metric_value(self.metric_id)
# The latency_unit configuration is not mandatory and we fallback to seconds, by default.
self.latency_unit = os.environ.get('LATENCY_UNIT') or self.data['cachet'].get('latency_unit') or 's'
# We need the current status so we monitor the status changes. This is necessary for creating incidents.
self.status = get_current_status(self.api_url, self.component_id, self.headers)
self.previous_status = self.status
# Get remaining settings
self.public_incidents = int(
os.environ.get('CACHET_PUBLIC_INCIDENTS') or self.data['cachet']['public_incidents'])
self.logger.info('Monitoring URL: %s %s' % (self.endpoint_method, self.endpoint_url))
self.expectations = [Expectaction.create(expectation) for expectation in self.data['endpoint']['expectation']]
for expectation in self.expectations:
self.logger.info('Registered expectation: %s' % (expectation,))
def get_default_metric_value(self, metric_id):
"""Returns default value for configured metric."""
get_metric_request = requests.get('%s/metrics/%s' % (self.api_url, metric_id), headers=self.headers)
if get_metric_request.ok:
return get_metric_request.json()['data']['default_value']
else:
raise MetricNonexistentError(metric_id)
def get_action(self):
"""Retrieves the action list from the configuration. If it's empty, returns an empty list.
:return: The list of actions, which can be an empty list.
"""
if self.data['cachet'].get('action') is None:
return []
else:
return self.data['cachet']['action']
def validate(self):
"""Validates the configuration by verifying the mandatory fields are
present and in the correct format. If the validation fails, a
ConfigurationValidationError is raised. Otherwise nothing will happen.
"""
configuration_errors = []
for key, sub_entries in configuration_mandatory_fields.iteritems():
if key not in self.data:
configuration_errors.append(key)
for sub_key in sub_entries:
if sub_key not in self.data[key]:
configuration_errors.append('%s.%s' % (key, sub_key))
if ('endpoint' in self.data and 'expectation' in
self.data['endpoint']):
if (not isinstance(self.data['endpoint']['expectation'], list) or
(isinstance(self.data['endpoint']['expectation'], list) and
len(self.data['endpoint']['expectation']) == 0)):
configuration_errors.append('endpoint.expectation')
if len(configuration_errors) > 0:
raise ConfigurationValidationError(
'Config file [%s] failed validation. Missing keys: %s' % (self.config_file,
', '.join(configuration_errors)))
def evaluate(self):
"""Sends the request to the URL set in the configuration and executes
each one of the expectations, one by one. The status will be updated
according to the expectation results.
"""
try:
self.request = requests.request(self.endpoint_method, self.endpoint_url, timeout=self.endpoint_timeout)
self.current_timestamp = int(time.time())
except requests.ConnectionError:
self.message = 'The URL is unreachable: %s %s' % (self.endpoint_method, self.endpoint_url)
self.logger.warning(self.message)
self.status = st.COMPONENT_STATUS_PARTIAL_OUTAGE
return
except requests.HTTPError:
self.message = 'Unexpected HTTP response'
self.logger.exception(self.message)
self.status = st.COMPONENT_STATUS_PARTIAL_OUTAGE
return
except requests.Timeout:
self.message = 'Request timed out'
self.logger.warning(self.message)
self.status = st.COMPONENT_STATUS_PERFORMANCE_ISSUES
return
# We initially assume the API is healthy.
self.status = st.COMPONENT_STATUS_OPERATIONAL
self.message = ''
for expectation in self.expectations:
status = expectation.get_status(self.request)
# The greater the status is, the worse the state of the API is.
if status > self.status:
self.status = status
self.message = expectation.get_message(self.request)
self.logger.info(self.message)
def print_out(self):
self.logger.info('Current configuration:\n%s' % (self.__repr__()))
def __repr__(self):
temporary_data = copy.deepcopy(self.data)
# Removing the token so we don't leak it in the logs.
del temporary_data['cachet']['token']
return dump(temporary_data, default_flow_style=False)
def if_trigger_update(self):
"""
Checks if update should be triggered - trigger it for all operational states
and only for non-operational ones above the configured threshold (allowed_fails).
"""
if self.status != 1:
self.current_fails = self.current_fails + 1
self.logger.info('Failure #%s with threshold set to %s' % (self.current_fails, self.allowed_fails))
if self.current_fails <= self.allowed_fails:
self.trigger_update = False
return
self.current_fails = 0
self.trigger_update = True
def push_status(self):
"""Pushes the status of the component to the cachet server. It will update the component
status based on the previous call to evaluate().
"""
if self.previous_status == self.status:
return
self.previous_status = self.status
if not self.trigger_update:
return
params = {'id': self.component_id, 'status': self.status}
component_request = requests.put('%s/components/%d' % (self.api_url, self.component_id), params=params,
headers=self.headers)
if component_request.ok:
# Successful update
self.logger.info('Component update: status [%d]' % (self.status,))
else:
# Failed to update the API status
self.logger.warning('Component update failed with status [%d]: API'
' status: [%d]' % (component_request.status_code, self.status))
def push_metrics(self):
"""Pushes the total amount of seconds the request took to get a response from the URL.
It only will send a request if the metric id was set in the configuration.
In case of failed connection trial pushes the default metric value.
"""
if 'metric_id' in self.data['cachet'] and hasattr(self, 'request'):
# We convert the elapsed time from the request, in seconds, to the configured unit.
value = self.default_metric_value if self.status != 1 else latency_unit.convert_to_unit(self.latency_unit,
self.request.elapsed.total_seconds())
params = {'id': self.metric_id, 'value': value,
'timestamp': self.current_timestamp}
metrics_request = requests.post('%s/metrics/%d/points' % (self.api_url, self.metric_id), params=params,
headers=self.headers)
if metrics_request.ok:
# Successful metrics upload
self.logger.info('Metric uploaded: %.6f %s' % (value, self.latency_unit))
else:
self.logger.warning('Metric upload failed with status [%d]' %
(metrics_request.status_code,))
def push_incident(self):
"""If the component status has changed, we create a new incident (if this is the first time it becomes unstable)
or updates the existing incident once it becomes healthy again.
"""
if not self.trigger_update:
return
if hasattr(self, 'incident_id') and self.status == st.COMPONENT_STATUS_OPERATIONAL:
# If the incident already exists, it means it was unhealthy but now it's healthy again.
params = {'status': 4, 'visible': self.public_incidents, 'component_id': self.component_id,
'component_status': self.status,
'notify': True}
incident_request = requests.put('%s/incidents/%d' % (self.api_url, self.incident_id), params=params,
headers=self.headers)
if incident_request.ok:
# Successful metrics upload
self.logger.info(
'Incident updated, API healthy again: component status [%d], message: "%s"' % (
self.status, self.message))
del self.incident_id
else:
self.logger.warning('Incident update failed with status [%d], message: "%s"' % (
incident_request.status_code, self.message))
elif not hasattr(self, 'incident_id') and self.status != st.COMPONENT_STATUS_OPERATIONAL:
# This is the first time the incident is being created.
params = {'name': 'URL unavailable', 'message': self.message, 'status': 1, 'visible': self.public_incidents,
'component_id': self.component_id, 'component_status': self.status, 'notify': True}
incident_request = requests.post('%s/incidents' % (self.api_url,), params=params, headers=self.headers)
if incident_request.ok:
# Successful incident upload.
self.incident_id = incident_request.json()['data']['id']
self.logger.info(
'Incident uploaded, API unhealthy: component status [%d], message: "%s"' % (
self.status, self.message))
else:
self.logger.warning(
'Incident upload failed with status [%d], message: "%s"' % (
incident_request.status_code, self.message))
class Expectaction(object):
"""Base class for URL result expectations. Any new excpectation should extend
this class and the name added to create() method.
"""
@staticmethod
def create(configuration):
"""Creates a list of expectations based on the configuration types
list.
"""
expectations = {
'HTTP_STATUS': HttpStatus,
'LATENCY': Latency,
'REGEX': Regex
}
return expectations.get(configuration['type'])(configuration)
@abc.abstractmethod
def get_status(self, response):
"""Returns the status of the API, following cachet's component status
documentation: https://docs.cachethq.io/docs/component-statuses
"""
@abc.abstractmethod
def get_message(self, response):
"""Gets the error message."""
class HttpStatus(Expectaction):
def __init__(self, configuration):
self.status_range = HttpStatus.parse_range(configuration['status_range'])
@staticmethod
def parse_range(range_string):
statuses = range_string.split("-")
if len(statuses) == 1:
# | |
<filename>ompclib/ompclib_numpy.py
# This file is a part of OMPC (http://ompc.juricap.com/)
#
# for testing:
# import ompclib_numpy; reload(ompclib_numpy); from ompclib_numpy import *
# TODO
# - remove all references to array, use "ompc_base._init_data" instead
import sys, os; sys.path.append(os.path.abspath('..'))
from itertools import izip as _izip, cycle as _cycle, repeat as _repeat
from ompc import _get_narginout
import os, sys
import numpy as np
import pylab as mpl
# Functions that are to be exported have to be listed in the __ompc_all__
# array.
# This decorator adds a function to the "toolbox-less" OMPC base library.
__ompc_all__ = ['end', 'mslice', 'mstring', 'OMPCSEMI',
'OMPCException', 'elmul', 'elpow', 'eldiv', 'ldiv', 'elldiv']
def _ompc_base(func):
global __ompc_all__
__ompc_all__ += [ func.__name__ ]
return func
OMPCSEMI = Ellipsis
OMPCEND = None
end = OMPCEND
_dtype2numpy = {'complex': 'complex128',
'double': 'f8', 'single': 'f4',
'int32': 'i4', 'uint32': 'u4',
'int16': 'i2', 'uint16': 'u2',
'int8': 'i1', 'uint8': 'u1',
'char': 'u1',
'bool': 'bool',
}
_numpy2dtype = {}
for k, v in _dtype2numpy.items():
_numpy2dtype[np.dtype(v)] = k
_numpy2dtype[str(np.dtype(v))] = k
_numpy2dtype[v] = k
# errors and warnings
class OMPCException(Exception):
def __init__(self,msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
@_ompc_base
def error(x):
raise OMPCException(x)
class mvar(object):
@staticmethod
def _DataObject(dtype, data):
return np.array(data, dtype=_dtype2numpy[dtype])
def __new__(cls, *args, **kwargs):
a = super(mvar, cls).__new__(cls, *args, **kwargs)
a._a = None
a.dtype = 'double'
a.msize = (0, 0)
return a
def _init_data(self, dtype, msize, data):
self.dtype = dtype
self.msize = msize
self._a = self._DataObject(dtype, data)
def __call__(self, *i):
mview = self.__getitem1__(i)
mview.__ompc_view__ = _mview(self, i, False)
return mview
def _ctypes_get(self):
return self._a.ctypes
ctypes = property(_ctypes_get, None, None,
"Ctypes-wrapped data object.")
def _lvalue_set(self, val):
assert hasattr(self, '__ompc_view__')
o = self.__ompc_view__
# FIXME: o.linear
o.viewed.__setitem1__(o.ins, val)
lvalue = property(None, _lvalue_set, None, "")
def __copy__(self):
return _marray(self.dtype, self.msize, self._a.copy())
def __deepcopy__(self):
return _marray(self.dtype, self.msize, self._a.copy())
def __base0__(self, shp=None):
raise OMPCException(
'Class "%s" cannot be used as index!'%self.__class__)
# FIXME: warn people about using numpy functions directly
def __array__(self):
print 'in __array__', repr(self)
print 'in __array__', self
raise NotImplementedError("At the moment using numpy functions " \
"directly is not possible! Please read the documentation at " \
"http://ompc.juricap.com/documentation/.")
def __nonzero__(self):
return bool(np.any(self._a != 0))
class _mview(mvar):
def __init__(self, viewed, ins, linear):
self.viewed = viewed
self.ins = ins
self.linear = linear
def __repr__(self):
return "_mview(%r, %r, %r)"%(self.viewed, self.ins, self.linear)
def __str__(self):
return "<view of %r>"%(self.viewed)
def _dsize(dtype):
return _dsize_dict[dtype]
def _flatten(seq):
for item in seq:
if _isscalar(item) and not hasattr(item, '__len__'):
yield item
else:
for subitem in _flatten(item):
yield subitem
def _ndi(*i):
"""Returns a generator of tuples that iterate over elements specified
by slices and indices in `i`."""
from itertools import chain, repeat, cycle, izip
r = lambda x: range(x.start, x.stop, x.step is None and 1 or x.step)
res = []
for x in i:
if isinstance(x, slice): res.append(r(x))
elif _isscalar(x): res.append([x])
else: res.append(x)
i = res
cp = 1
gs = []
for x in i[:-1]:
gs += [ cycle(chain(*(repeat(j,cp) for j in x))) ]
cp *= len(x)
gs += [ chain(*(repeat(j,cp) for j in i[-1])) ]
return izip(*gs)
def _isscalar(A):
if isinstance(A, str):
return False
elif hasattr(A, '__len__') and len(A) > 1:
return False
elif hasattr(A, '__getitem__'):
try: A[1]
except: return True
else: return False
elif hasattr(A, '__iter__'):
return False
# doesn't have length nor multiple elements and doesn't support iteration
return True
def _typegreater_(Adt, Bdt):
"""Returns type with higher precision."""
if isinstance(Adt, _marray): Adt = Adt.dtype
if isinstance(Bdt, _marray): Bdt = Bdt.dtype
return _dsize_dict[Adt] >= _dsize_dict[Bdt] and Adt or Bdt
def _typegreater(Adt, Bdt):
"""Returns type with higher precision."""
return _dsize_dict[Adt] >= _dsize_dict[Bdt] and Adt or Bdt
def _dtype(X):
# from operator import isSequenceType
# while isSequenceType(X):
# X = X[0]
# res = tuple(reversed(shp))
# FIXME: return
if isinstance(X, str):
return 'char'
return 'double'
def _size(X, d=None):
if isinstance(X, _marray):
res = X.msize
elif _isscalar(X):
return (1, 1)
else:
from operator import isSequenceType
shp = []
while isSequenceType(X):
shp.append(len(X))
X = X[0]
res = tuple(reversed(shp))
# minimum shape is 2 dimensional
if len(res) == 1:
res = (1, res[0])
if d is None:
return res
else:
return res[d]
def _ndshape(msize, *i):
"""Determine the shape of a view on A with slicing specified in `i`.
"""
shp = []
for idim, x in enumerate(i):
if isinstance(x, slice):
start, stop, step = x.start, x.stop, x.step
if x.start is None: start = 0
if x.stop == sys.maxint or x.stop is None: stop = msize[idim]
if x.step is None: step = 1
shp.append( len(range(start,stop,step)) )
elif _isscalar(x):
shp.append(1)
elif hasattr(x, '__len__'):
shp.append(len(x))
else:
raise NotImplementedError()
if len(shp) == 1: shp[:0] = [1]
return shp
def _ndshape1(msize, *i):
"""Determine shape of a view on size msize with slicing specified in `i`.
"""
shp = []
for idim, x in enumerate(i):
if isinstance(x, _mslice):
if x.hasnoend():
shp.append( len(mslice[x.start:x.step:msize[idim]]) )
else:
shp.append( len(x) )
elif _isscalar(x):
shp.append(1)
elif hasattr(x, '__len__'):
shp.append(len(x))
else:
if isinstance(x, slice):
raise NotImplementedError()
shp.append(mrange(x))
else:
raise NotImplementedError()
#if len(shp) == 1: shp[:0] = [1]
if len(shp) == 1:
if msize[0] == 1: shp[:0] = [1]
else: shp.append(1)
return shp
@_ompc_base
def isempty(A):
return np.prod(A.msize) == 0
###################### base mfunctions
@_ompc_base
def plus(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = A+B
return _marray(_numpy2dtype[na.dtype], na.shape[::-1], na)
@_ompc_base
def minus(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = A-B
return _marray(_numpy2dtype[na.dtype], na.shape[::-1], na)
@_ompc_base
def uminus(A):
if isinstance(A, mvar): A = A._a
na = -A
return _marray(_numpy2dtype[na.dtype], na.shape[::-1], na)
@_ompc_base
def times(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = A*B
return _marray(_numpy2dtype[na.dtype], na.shape[::-1], na)
@_ompc_base
def mtimes(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
# the arrays are stored transposed
na = np.dot(B, A)
return _marray(_numpy2dtype[na.dtype], na.shape[::-1], na)
@_ompc_base
def power(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = A**B
return _marray(_numpy2dtype[na.dtype], na.shape[::-1], na)
try:
from numpy.linalg import matrix_power
except:
def matrix_power(M,n):
if len(M.shape) != 2 or M.shape[0] != M.shape[1]:
raise ValueError("input must be a square array")
if not issubdtype(type(n),int):
raise TypeError("exponent must be an integer")
from numpy.linalg import inv
if n==0:
M = M.copy()
M[:] = np.identity(M.shape[0])
return M
elif n<0:
M = inv(M)
n *= -1
result = M
if n <= 3:
for _ in range(n-1):
result = np.dot(result, M)
return result
# binary decomposition to reduce the number of Matrix
# multiplications for n > 3.
beta = np.binary_repr(n)
Z, q, t = M, 0, len(beta)
while beta[t-q-1] == '0':
Z = np.dot(Z,Z)
q += 1
result = Z
for k in range(q+1,t):
Z = np.dot(Z,Z)
if beta[t-k-1] == '1':
result = np.dot(result,Z)
return result
@_ompc_base
def mpower(A, B):
if len(A.msize) != 2:
raise OMPCException('??? Error using ==> mpower\n'
'marray must be 2-D')
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
if isinstance(B, float):
if np.around(him) != him: raise NotImplementedError()
else: B = int(B)
na = matrix_power(A.T, B)
return _marray(_numpy2dtype[na.dtype], na.shape[::-1], na)
_solve = np.linalg.solve
@_ompc_base
def mldivide(A, B):
# FIXME A, B have to be matrices
if A.msize[0] == A.msize[1]:
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = _solve(A, B)
msize = na.shape[::-1]
if len(msize) == 1: msize = (msize[0], 1)
return _marray(_numpy2dtype(na.dtype), msize, na.T)
else:
raise NotImplementedError()
raise NotImplementedError()
@_ompc_base
def mrdivide(A, B):
"A/B = (B.T\A.T).T"
return mldivide(B.T, A.T).T
# raise NotImplementedError()
@_ompc_base
def ldivide(A, B):
return rdivide(B, A)
@_ompc_base
def rdivide(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = A / B
return _marray(_numpy2dtype[na.dtype], na.shape[::-1], na)
@_ompc_base
def eq(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = A == B
return _marray('bool', na.shape[::-1], na)
@_ompc_base
def ne(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = A != B
return _marray('bool', na.shape[::-1], na)
@_ompc_base
def lt(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = A < B
return _marray('bool', na.shape[::-1], na)
@_ompc_base
def gt(A, B):
if isinstance(A, mvar): A = A._a
if isinstance(B, mvar): B = B._a
na = A > B
return _marray('bool', na.shape[::-1], na)
@_ompc_base
def le(A, B):
if isinstance(A, mvar): | |
<reponame>TrustPayments/python-sdk
# coding: utf-8
import pprint
import six
from enum import Enum
from . import TransactionAwareEntity
class TransactionInvoice(TransactionAwareEntity):
swagger_types = {
'amount': 'float',
'billing_address': 'Address',
'completion': 'TransactionCompletion',
'created_on': 'datetime',
'derecognized_by': 'int',
'derecognized_on': 'datetime',
'due_on': 'datetime',
'environment': 'Environment',
'external_id': 'str',
'language': 'str',
'line_items': 'list[LineItem]',
'merchant_reference': 'str',
'outstanding_amount': 'float',
'paid_on': 'datetime',
'planned_purge_date': 'datetime',
'space_view_id': 'int',
'state': 'TransactionInvoiceState',
'tax_amount': 'float',
'time_zone': 'str',
'version': 'int',
}
attribute_map = {
'amount': 'amount','billing_address': 'billingAddress','completion': 'completion','created_on': 'createdOn','derecognized_by': 'derecognizedBy','derecognized_on': 'derecognizedOn','due_on': 'dueOn','environment': 'environment','external_id': 'externalId','language': 'language','line_items': 'lineItems','merchant_reference': 'merchantReference','outstanding_amount': 'outstandingAmount','paid_on': 'paidOn','planned_purge_date': 'plannedPurgeDate','space_view_id': 'spaceViewId','state': 'state','tax_amount': 'taxAmount','time_zone': 'timeZone','version': 'version',
}
_amount = None
_billing_address = None
_completion = None
_created_on = None
_derecognized_by = None
_derecognized_on = None
_due_on = None
_environment = None
_external_id = None
_language = None
_line_items = None
_merchant_reference = None
_outstanding_amount = None
_paid_on = None
_planned_purge_date = None
_space_view_id = None
_state = None
_tax_amount = None
_time_zone = None
_version = None
def __init__(self, **kwargs):
self.discriminator = None
self.amount = kwargs.get('amount', None)
self.billing_address = kwargs.get('billing_address', None)
self.completion = kwargs.get('completion', None)
self.created_on = kwargs.get('created_on', None)
self.derecognized_by = kwargs.get('derecognized_by', None)
self.derecognized_on = kwargs.get('derecognized_on', None)
self.due_on = kwargs.get('due_on', None)
self.environment = kwargs.get('environment', None)
self.external_id = kwargs.get('external_id', None)
self.language = kwargs.get('language', None)
self.line_items = kwargs.get('line_items', None)
self.merchant_reference = kwargs.get('merchant_reference', None)
self.outstanding_amount = kwargs.get('outstanding_amount', None)
self.paid_on = kwargs.get('paid_on', None)
self.planned_purge_date = kwargs.get('planned_purge_date', None)
self.space_view_id = kwargs.get('space_view_id', None)
self.state = kwargs.get('state', None)
self.tax_amount = kwargs.get('tax_amount', None)
self.time_zone = kwargs.get('time_zone', None)
self.version = kwargs.get('version', None)
super().__init__(**kwargs)
self.swagger_types.update(super().swagger_types)
self.attribute_map.update(super().attribute_map)
@property
def amount(self):
"""Gets the amount of this TransactionInvoice.
:return: The amount of this TransactionInvoice.
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this TransactionInvoice.
:param amount: The amount of this TransactionInvoice.
:type: float
"""
self._amount = amount
@property
def billing_address(self):
"""Gets the billing_address of this TransactionInvoice.
:return: The billing_address of this TransactionInvoice.
:rtype: Address
"""
return self._billing_address
@billing_address.setter
def billing_address(self, billing_address):
"""Sets the billing_address of this TransactionInvoice.
:param billing_address: The billing_address of this TransactionInvoice.
:type: Address
"""
self._billing_address = billing_address
@property
def completion(self):
"""Gets the completion of this TransactionInvoice.
:return: The completion of this TransactionInvoice.
:rtype: TransactionCompletion
"""
return self._completion
@completion.setter
def completion(self, completion):
"""Sets the completion of this TransactionInvoice.
:param completion: The completion of this TransactionInvoice.
:type: TransactionCompletion
"""
self._completion = completion
@property
def created_on(self):
"""Gets the created_on of this TransactionInvoice.
The date on which the invoice is created on.
:return: The created_on of this TransactionInvoice.
:rtype: datetime
"""
return self._created_on
@created_on.setter
def created_on(self, created_on):
"""Sets the created_on of this TransactionInvoice.
The date on which the invoice is created on.
:param created_on: The created_on of this TransactionInvoice.
:type: datetime
"""
self._created_on = created_on
@property
def derecognized_by(self):
"""Gets the derecognized_by of this TransactionInvoice.
The id of the user which marked the invoice as derecognized.
:return: The derecognized_by of this TransactionInvoice.
:rtype: int
"""
return self._derecognized_by
@derecognized_by.setter
def derecognized_by(self, derecognized_by):
"""Sets the derecognized_by of this TransactionInvoice.
The id of the user which marked the invoice as derecognized.
:param derecognized_by: The derecognized_by of this TransactionInvoice.
:type: int
"""
self._derecognized_by = derecognized_by
@property
def derecognized_on(self):
"""Gets the derecognized_on of this TransactionInvoice.
The date on which the invoice is marked as derecognized.
:return: The derecognized_on of this TransactionInvoice.
:rtype: datetime
"""
return self._derecognized_on
@derecognized_on.setter
def derecognized_on(self, derecognized_on):
"""Sets the derecognized_on of this TransactionInvoice.
The date on which the invoice is marked as derecognized.
:param derecognized_on: The derecognized_on of this TransactionInvoice.
:type: datetime
"""
self._derecognized_on = derecognized_on
@property
def due_on(self):
"""Gets the due_on of this TransactionInvoice.
The date on which the invoice should be paid on.
:return: The due_on of this TransactionInvoice.
:rtype: datetime
"""
return self._due_on
@due_on.setter
def due_on(self, due_on):
"""Sets the due_on of this TransactionInvoice.
The date on which the invoice should be paid on.
:param due_on: The due_on of this TransactionInvoice.
:type: datetime
"""
self._due_on = due_on
@property
def environment(self):
"""Gets the environment of this TransactionInvoice.
:return: The environment of this TransactionInvoice.
:rtype: Environment
"""
return self._environment
@environment.setter
def environment(self, environment):
"""Sets the environment of this TransactionInvoice.
:param environment: The environment of this TransactionInvoice.
:type: Environment
"""
self._environment = environment
@property
def external_id(self):
"""Gets the external_id of this TransactionInvoice.
The external id helps to identify the entity and a subsequent creation of an entity with the same ID will not create a new entity.
:return: The external_id of this TransactionInvoice.
:rtype: str
"""
return self._external_id
@external_id.setter
def external_id(self, external_id):
"""Sets the external_id of this TransactionInvoice.
The external id helps to identify the entity and a subsequent creation of an entity with the same ID will not create a new entity.
:param external_id: The external_id of this TransactionInvoice.
:type: str
"""
if external_id is not None and len(external_id) > 100:
raise ValueError("Invalid value for `external_id`, length must be less than or equal to `100`")
if external_id is not None and len(external_id) < 1:
raise ValueError("Invalid value for `external_id`, length must be greater than or equal to `1`")
self._external_id = external_id
@property
def language(self):
"""Gets the language of this TransactionInvoice.
:return: The language of this TransactionInvoice.
:rtype: str
"""
return self._language
@language.setter
def language(self, language):
"""Sets the language of this TransactionInvoice.
:param language: The language of this TransactionInvoice.
:type: str
"""
self._language = language
@property
def line_items(self):
"""Gets the line_items of this TransactionInvoice.
:return: The line_items of this TransactionInvoice.
:rtype: list[LineItem]
"""
return self._line_items
@line_items.setter
def line_items(self, line_items):
"""Sets the line_items of this TransactionInvoice.
:param line_items: The line_items of this TransactionInvoice.
:type: list[LineItem]
"""
self._line_items = line_items
@property
def merchant_reference(self):
"""Gets the merchant_reference of this TransactionInvoice.
:return: The merchant_reference of this TransactionInvoice.
:rtype: str
"""
return self._merchant_reference
@merchant_reference.setter
def merchant_reference(self, merchant_reference):
"""Sets the merchant_reference of this TransactionInvoice.
:param merchant_reference: The merchant_reference of this TransactionInvoice.
:type: str
"""
if merchant_reference is not None and len(merchant_reference) > 100:
raise ValueError("Invalid value for `merchant_reference`, length must be less than or equal to `100`")
self._merchant_reference = merchant_reference
@property
def outstanding_amount(self):
"""Gets the outstanding_amount of this TransactionInvoice.
The outstanding amount indicates how much the buyer owes the merchant. A negative amount indicates that the invoice is overpaid.
:return: The outstanding_amount of this TransactionInvoice.
:rtype: float
"""
return self._outstanding_amount
@outstanding_amount.setter
def outstanding_amount(self, outstanding_amount):
"""Sets the outstanding_amount of this TransactionInvoice.
The outstanding amount indicates how much the buyer owes the merchant. A negative amount indicates that the invoice is overpaid.
:param outstanding_amount: The outstanding_amount of this TransactionInvoice.
:type: float
"""
self._outstanding_amount = outstanding_amount
@property
def paid_on(self):
"""Gets the paid_on of this TransactionInvoice.
The date on which the invoice is marked as paid. Eventually this date lags behind of the actual paid date.
:return: The paid_on of this TransactionInvoice.
:rtype: datetime
"""
return self._paid_on
@paid_on.setter
def paid_on(self, paid_on):
"""Sets the paid_on of this TransactionInvoice.
The date on which the invoice is marked as paid. Eventually this date lags behind of the actual paid date.
:param paid_on: The paid_on of this TransactionInvoice.
:type: datetime
"""
self._paid_on = paid_on
@property
def planned_purge_date(self):
"""Gets the planned_purge_date of this TransactionInvoice.
The planned purge date indicates when the entity is permanently removed. When the date is null the entity is not planned to be removed.
:return: The planned_purge_date of this TransactionInvoice.
:rtype: datetime
"""
return self._planned_purge_date
@planned_purge_date.setter
def planned_purge_date(self, planned_purge_date):
"""Sets the planned_purge_date of this TransactionInvoice.
The planned purge date indicates when the entity is permanently removed. When the date is null the entity is not planned to be removed.
:param planned_purge_date: The planned_purge_date of this TransactionInvoice.
:type: datetime
"""
self._planned_purge_date = planned_purge_date
@property
def space_view_id(self):
"""Gets the space_view_id of this TransactionInvoice.
:return: The space_view_id of this TransactionInvoice.
:rtype: int
"""
return self._space_view_id
@space_view_id.setter
def space_view_id(self, space_view_id):
"""Sets the space_view_id of this TransactionInvoice.
:param space_view_id: The space_view_id of this TransactionInvoice.
:type: int
"""
self._space_view_id = space_view_id
| |
<reponame>al-fontes-jr/bardolph
import copy
import logging
from bardolph.controller import units
from bardolph.controller.get_key import getch
from bardolph.controller.i_controller import LightSet
from bardolph.controller.units import UnitMode
from bardolph.lib.i_lib import Clock, TimePattern
from bardolph.lib.injection import inject, injected, provide
from bardolph.lib.symbol import Symbol
from .call_stack import CallStack
from .loader import Loader
from .vm_codes import JumpCondition, LoopVar, OpCode, Operand, Register, SetOp
from .vm_discover import VmDiscover
from .vm_io import VmIo
from .vm_math import VmMath
class Registers:
def __init__(self):
self.blue = 0.0
self.brightness = 0.0
self.disc_forward = False
self.duration = 0.0
self.first_zone = 0
self.green = 0.0
self.hue = 0.0
self.kelvin = 0.0
self.last_zone = 0
self.name = None
self.operand = Operand.NULL
self.pc = 0
self.power = False
self.red = 0.0
self.result = None
self.saturation = 0.0
self.time = 0.0 # ms.
self.unit_mode = UnitMode.LOGICAL
def get_color(self):
if self.unit_mode is not UnitMode.RGB:
return [self.hue, self.saturation, self.brightness, self.kelvin]
return [self.red, self.green, self.blue, self.kelvin]
def store_color(self, color) -> None:
if self.unit_mode is not UnitMode.RGB:
self.hue, self.saturation, self.brightness, self.kelvin = color
else:
self.red, self.green, self.blue, self.kelvin = color
def get_by_enum(self, reg):
return getattr(self, reg.name.lower())
def set_by_enum(self, reg, value):
setattr(self, reg.name.lower(), value)
def reset(self):
self.__init__()
def get_power(self):
return 65535 if self.power else 0
class MachineState:
def __init__(self, reg, call_stack):
self.reg = reg
self.call_stack = call_stack
class Machine:
def __init__(self):
self._cue_time = 0
self._clock = provide(Clock)
self._routines = {}
self._program = []
self._reg = Registers()
self._call_stack = CallStack()
self._vm_io = VmIo(self._call_stack, self._reg)
self._vm_math = VmMath(self._call_stack, self._reg)
self._vm_discover = VmDiscover(self._call_stack, self._reg)
self._enable_pause = True
self._keep_running = True
excluded = (OpCode.STOP, OpCode.ROUTINE)
op_codes = [code for code in OpCode
if not str(code.name).startswith('_')
and code not in excluded]
self._fn_table = {
op_code: getattr(self, '_' + op_code.name.lower())
for op_code in (op_codes)}
self._fn_table[OpCode.STOP] = self.stop
def reset(self) -> None:
self._reg.reset()
self._routines.clear()
self._cue_time = 0
self._call_stack.reset()
self._vm_math.reset()
self._keep_running = True
self._enable_pause = True
def run(self, program) -> None:
loader = Loader()
loader.load(program, self._routines)
self._program = loader.code
self._keep_running = True
logging.debug('Starting to execute.')
self._clock.start()
program_len = len(self._program)
try:
while self._keep_running and self._reg.pc < program_len:
inst = self._program[self._reg.pc]
if inst.op_code == OpCode.STOP:
break
self._fn_table[inst.op_code]()
if inst.op_code not in (OpCode.END, OpCode.JSR, OpCode.JUMP):
self._reg.pc += 1
self._clock.stop()
logging.debug(
'Stopped, _keep_running = {}, _pc = {}, program_len = {}'
.format(
self._keep_running, self._reg.pc, program_len))
except Exception as ex:
logging.error("Machine stopped due to {}".format(ex))
def stop(self) -> None:
self._keep_running = False
self._clock.stop()
def get_state(self) -> MachineState:
return MachineState(self._reg, self._call_stack)
def color_to_reg(self, color) -> None:
reg = self._reg
if reg.unit_mode in (UnitMode.RAW, UnitMode.LOGICAL):
reg.hue, reg.saturation, reg.brightness, reg.kelvin = color
else:
reg.red, reg.green, reg.blue, reg.kelvin = color
def color_from_reg(self):
return self._reg.get_color()
def get_variable(self, name):
return self._call_stack.get_variable(name)
@property
def current_inst(self):
return self._program[self._reg.pc]
def _color(self) -> None:
fn_map = {operand: fn for operand, fn in (
(Operand.ALL, self._color_all),
(Operand.LIGHT, self._color_light),
(Operand.GROUP, self._color_group),
(Operand.LOCATION, self._color_location),
(Operand.MZ_LIGHT, self._color_mz_light))}
fn_map[self._reg.operand]()
@inject(LightSet)
def _color_all(self, light_set=injected) -> None:
color = self._assure_raw_color(self._reg.get_color())
duration = self._assure_raw_time(self._reg.duration)
light_set.set_color(color, duration)
@inject(LightSet)
def _color_light(self, light_set=injected) -> None:
light = light_set.get_light(self._reg.name)
if light is None:
Machine._report_missing(self._reg.name)
else:
light.set_color(
self._assure_raw_color(self._reg.get_color()),
self._assure_raw_time(self._reg.duration))
@inject(LightSet)
def _color_mz_light(self, light_set=injected) -> None:
light = light_set.get_light(self._reg.name)
if light is None:
Machine._report_missing(self._reg.name)
elif self._zone_check(light):
# Unknown why this happens.
if not hasattr(light, 'set_zone_color'):
logging.error(
'No set_zone_color for light of type', type(light))
else:
start_index = self._reg.first_zone
end_index = self._reg.last_zone
if end_index is None:
end_index = start_index
light.set_zone_color(
start_index, end_index + 1,
self._assure_raw_color(self._reg.get_color()),
self._assure_raw_time(self._reg.duration))
@inject(LightSet)
def _color_group(self, light_set=injected) -> None:
light_names = light_set.get_group(self._reg.name)
if light_names is None:
logging.warning("Unknown group: {}".format(self._reg.name))
else:
self._color_multiple(
[light_set.get_light(name) for name in light_names])
@inject(LightSet)
def _color_location(self, light_set=injected) -> None:
light_names = light_set.get_location(self._reg.name)
if light_names is None:
logging.warning("Unknown location: {}".format(self._reg.name))
else:
self._color_multiple(
[light_set.get_light(name) for name in light_names])
def _color_multiple(self, lights) -> None:
color = self._assure_raw_color(self._reg.get_color())
duration = self._assure_raw_time(self._reg.duration)
for light in lights:
light.set_color(color, duration)
def _power(self) -> None: {
Operand.ALL: self._power_all,
Operand.LIGHT: self._power_light,
Operand.GROUP: self._power_group,
Operand.LOCATION: self._power_location
}[self._reg.operand]()
@inject(LightSet)
def _power_all(self, light_set=injected) -> None:
duration = self._assure_raw_time(self._reg.duration)
light_set.set_power(self._reg.get_power(), duration)
@inject(LightSet)
def _power_light(self, light_set=injected) -> None:
light = light_set.get_light(self._reg.name)
if light is None:
Machine._report_missing(self._reg.name)
else:
duration = self._assure_raw_time(self._reg.duration)
light.set_power(self._reg.get_power(), duration)
@inject(LightSet)
def _power_group(self, light_set=injected) -> None:
light_names = light_set.get_group(self._reg.name)
if light_names is None:
logging.warning(
'Power invoked for unknown group "{}"'.format(self._reg.name))
else:
self._power_multiple(
[light_set.get_light(name) for name in light_names])
@inject(LightSet)
def _power_location(self, light_set=injected) -> None:
light_names = light_set.get_location(self._reg.name)
if light_names is None:
logging.warning(
"Power invoked for unknown location: {}".format(self._reg.name))
else:
self._power_multiple(
[light_set.get_light(name) for name in light_names])
def _power_multiple(self, lights) -> None:
power = self._reg.get_power()
for light in lights:
light.set_power(power, self._reg.duration)
@inject(LightSet)
def _get_color(self, light_set=injected) -> None:
light = light_set.get_light(self._reg.name)
if light is None:
Machine._report_missing(self._reg.name)
else:
if self._reg.operand is Operand.MZ_LIGHT:
if self._zone_check(light):
zone = self._reg.first_zone
color = light.get_color_zones(zone, zone + 1)[0]
self.color_to_reg(self._maybe_converted_color(color))
else:
color = light.get_color()
self.color_to_reg(self._maybe_converted_color(color))
def _param(self) -> None:
"""
param instruction: the name of the parameter is in param0, and its
value is in param1. If the value is a Symbol or Register, it needs to
be dereferenced.
"""
inst = self.current_inst
value = inst.param1
if isinstance(value, Symbol):
value = self._call_stack.get_variable(value.name)
elif isinstance(value, Register):
value = self._reg.get_by_enum(value)
self._call_stack.put_param(inst.param0, value)
def _jsr(self) -> None:
inst = self.current_inst
self._call_stack.set_return(self._reg.pc + 1)
self._call_stack.push_current()
routine_name = inst.param0
rtn = self._routines.get(routine_name, None)
self._reg.pc = rtn.get_address()
def _end(self) -> None:
self._call_stack.unwind_loops()
ret_addr = self._call_stack.get_return()
self._call_stack.pop_current()
self._reg.pc = ret_addr
def _jump(self) -> None:
# In the current instruction, param0 contains the condition, and
# param1 contains the offset.
inst = self.current_inst
if inst.param0 is JumpCondition.INDIRECT:
address = self._call_stack.get_variable(inst.param1)
self._reg.pc = address
else:
jump_if = {
JumpCondition.ALWAYS: {True: True, False: True},
JumpCondition.IF_FALSE: {True: False, False: True},
JumpCondition.IF_TRUE: {True: True, False: False}
}
if jump_if[inst.param0][bool(self._reg.result)]:
self._reg.pc += inst.param1
else:
self._reg.pc += 1
def _loop(self) -> None:
self._call_stack.enter_loop()
def _end_loop(self) -> None:
self._call_stack.exit_loop()
def _nop(self) -> None: pass
def _push(self) -> None:
self._vm_math.push(self.current_inst.param0)
def _pushq(self) -> None:
self._vm_math.pushq(self.current_inst.param0)
def _pop(self) -> None:
self._vm_math.pop(self.current_inst.param0)
def _op(self) -> None:
self._vm_math.op(self.current_inst.param0)
def _bin_op(self, operator) -> None:
try:
self._vm_math.bin_op(operator)
except ZeroDivisionError:
self._trigger_error("Division by zero. Halting execution.")
self.stop()
def _unary_op(self, operator) -> None:
self._vm_math.unary_op(operator)
def _disc(self) -> None:
self._vm_discover.disc()
def _discm(self) -> None:
self._vm_discover.discm(self.current_inst.param0)
def _dnext(self) -> None:
self._vm_discover.dnext(self.current_inst.param0)
def _dnextm(self) -> None:
self._vm_discover.dnextm(
self.current_inst.param0, self.current_inst.param1)
def _out(self) -> None:
self._vm_io.out(self.current_inst)
def _pause(self) -> None:
if self._enable_pause:
print("Press any key to continue, q to quit, "
+ "! to run without stopping again.")
char = getch()
if char == 'q':
self.stop()
else:
print("Running...")
if char == '!':
self._enable_pause = False
def _constant(self) -> None:
name = self.current_inst.param0
value = self.current_inst.param1
self._call_stack.put_constant(name, value)
def _wait(self) -> None:
time = self._reg.time
if isinstance(time, TimePattern):
self._clock.wait_until(time)
elif time > 0:
if self._reg.unit_mode is UnitMode.RAW:
time /= 1000.0
self._clock.pause_for(time)
def _assure_raw_time(self, value) -> int:
if self._reg.unit_mode in (UnitMode.LOGICAL, UnitMode.RGB):
return units.time_raw(value)
return value
def _assure_raw_color(self, color):
if self._reg.unit_mode is UnitMode.RAW:
return color
if self._reg.unit_mode is UnitMode.RGB:
return units.rgb_to_raw(color)
return units.logical_to_raw(color)
def _maybe_converted_color(self, color):
"""
The incoming color always consists of raw values.
"""
if self._reg.unit_mode is UnitMode.RAW:
return color
if self._reg.unit_mode is UnitMode.LOGICAL:
return units.raw_to_logical(color)
return units.raw_to_rgb(color)
def _move(self) -> None:
"""
Move from variable/register to variable/register.
"""
inst = self.current_inst
value = inst.param0
dest = inst.param1
if isinstance(value, Register):
value = self._reg.get_by_enum(value)
elif isinstance(value, (str, LoopVar)):
value = self._call_stack.get_variable(value)
self._do_put_value(dest, value)
def _moveq(self) -> None:
"""
Move a value from the instruction itself into a register or variable.
"""
value = self.current_inst.param0
dest = self.current_inst.param1
if dest is Register.UNIT_MODE:
self._switch_unit_mode(value)
else:
self._do_put_value(dest, value)
@staticmethod
def _convert_units_fn(from_mode, to_mode):
def key(mode0, mode1): return str(mode0) + str(mode1)
converters = (
(UnitMode.LOGICAL, UnitMode.RAW, units.logical_to_raw),
(UnitMode.LOGICAL, UnitMode.RGB, units.logical_to_rgb),
(UnitMode.RGB, UnitMode.RAW, units.rgb_to_raw),
(UnitMode.RGB, UnitMode.LOGICAL, units.rgb_to_logical),
(UnitMode.RAW, UnitMode.RGB, units.raw_to_rgb),
(UnitMode.RAW, UnitMode.LOGICAL, units.raw_to_logical))
convert_dict = {key(from_mode, to_mode): fn
for from_mode, to_mode, fn in converters}
return convert_dict[key(from_mode, to_mode)]
def _switch_unit_mode(self, to_mode) -> None:
from_mode = self._reg.unit_mode
if from_mode is to_mode:
return
original_color = self._reg.get_color()
self._reg.unit_mode = to_mode
converter = self._convert_units_fn(from_mode, to_mode)
self._reg.store_color(converter(original_color))
if to_mode is UnitMode.RAW:
self._reg.duration = units.time_raw(self._reg.duration)
self._reg.time = units.time_raw(self._reg.time)
elif from_mode is UnitMode.RAW:
self._reg.duration = units.time_logical(self._reg.duration)
self._reg.time = units.time_logical(self._reg.time)
def _do_put_value(self, dest, value) -> None:
if isinstance(dest, Register):
self._reg.set_by_enum(dest, value)
else:
self._call_stack.put_variable(dest, value)
def _time_pattern(self) -> None:
inst = self.current_inst
if inst.param0 == SetOp.INIT:
self._reg.time = inst.param1
else:
self._reg.time.union(inst.param1)
def _zone_check(self, light) -> | |
<gh_stars>0
# coding: utf8
import argparse
from clinicadl.tools.deep_learning.iotools import Parameters
from colorama import Fore
TRAIN_CATEGORIES = {
# General parent group
'POSITIONAL': '%sPositional arguments%s' % (Fore.BLUE, Fore.RESET),
'COMPUTATIONAL': '%sComputational resources%s' % (Fore.BLUE, Fore.RESET),
'DATA': '%sData management%s' % (Fore.BLUE, Fore.RESET),
'CROSS-VALIDATION': '%sCross-validation arguments%s' % (Fore.BLUE, Fore.RESET),
'OPTIMIZATION': '%sOptimization parameters%s' % (Fore.BLUE, Fore.RESET),
# Other parent groups
'TRANSFER LEARNING': '%sTransfer learning%s' % (Fore.BLUE, Fore.RESET),
'AUTOENCODER': '%sAutoencoder specific%s' % (Fore.BLUE, Fore.RESET),
# Slice-level
'SLICE': '%sSlice-level parameters%s' % (Fore.BLUE, Fore.RESET),
# Patch arguments
'PATCH': '%sPatch-level parameters%s' % (Fore.BLUE, Fore.RESET),
'PATCH CNN': '%sPatch-level CNN parameters%s' % (Fore.BLUE, Fore.RESET),
# ROI-based arguments
'ROI': '%sROI-based parameters%s' % (Fore.BLUE, Fore.RESET),
'ROI CNN': '%sROI-based CNN parameters%s' % (Fore.BLUE, Fore.RESET),
}
def set_default_dropout(args):
if args.dropout is None:
if args.mode == 'image':
args.dropout = 0.5
elif args.mode == 'slice':
args.dropout = 0.8
else:
args.dropout = 0
def preprocessing_t1w_func(args):
from .preprocessing.T1_linear import preprocessing_t1w
wf = preprocessing_t1w(
args.bids_dir,
args.caps_dir,
args.tsv_file,
args.working_dir
)
wf.run(plugin='MultiProc', plugin_args={'n_procs': args.nproc})
def extract_data_func(args):
from .preprocessing.T1_preparedl import extract_dl_t1w
wf = extract_dl_t1w(
args.caps_dir,
args.tsv_file,
args.working_dir,
args.extract_method,
args.patch_size,
args.stride_size,
args.slice_direction,
args.slice_mode
)
wf.run(plugin='MultiProc', plugin_args={'n_procs': args.nproc})
def qc_func(args):
from clinicadl.quality_check.quality_check import quality_check
quality_check(
args.caps_dir,
args.tsv_file,
args.output_path,
threshold=args.threshold,
batch_size=args.batch_size,
num_workers=args.nproc,
gpu=not args.use_cpu
)
def generate_data_func(args):
from .tools.data.generate_data import generate_random_dataset, generate_trivial_dataset
if args.mode == "random":
generate_random_dataset(
caps_dir=args.caps_dir,
tsv_path=args.tsv_path,
output_dir=args.output_dir,
n_subjects=args.n_subjects,
mean=args.mean,
sigma=args.sigma,
preprocessing=args.preprocessing)
else:
generate_trivial_dataset(
caps_dir=args.caps_dir,
tsv_path=args.tsv_path,
output_dir=args.output_dir,
n_subjects=args.n_subjects,
preprocessing=args.preprocessing,
mask_path=args.mask_path,
atrophy_percent=args.atrophy_percent,
)
# Function to dispatch training to corresponding function
def train_func(args):
from .train import train_autoencoder, train_multi_cnn, train_single_cnn
set_default_dropout(args)
if args.mode == 'image':
if args.mode_task == "autoencoder":
train_params_autoencoder = Parameters(
args.mode,
args.tsv_path,
args.output_dir,
args.caps_dir,
args.preprocessing,
args.model
)
train_params_autoencoder.write(
diagnoses=args.diagnoses,
baseline=args.baseline,
minmaxnormalization=not args.unnormalize,
n_splits=args.n_splits,
split=args.split,
accumulation_steps=args.accumulation_steps,
epochs=args.epochs,
learning_rate=args.learning_rate,
patience=args.patience,
tolerance=args.tolerance,
optimizer='Adam',
weight_decay=args.weight_decay,
gpu=not args.use_cpu,
batch_size=args.batch_size,
evaluation_steps=args.evaluation_steps,
num_workers=args.nproc,
visualization=args.visualization
)
train_autoencoder(train_params_autoencoder)
else:
train_params_cnn = Parameters(
args.mode,
args.tsv_path,
args.output_dir,
args.caps_dir,
args.preprocessing,
args.model
)
train_params_cnn.write(
diagnoses=args.diagnoses,
baseline=args.baseline,
minmaxnormalization=not args.unnormalize,
n_splits=args.n_splits,
split=args.split,
accumulation_steps=args.accumulation_steps,
epochs=args.epochs,
learning_rate=args.learning_rate,
patience=args.patience,
tolerance=args.tolerance,
optimizer='Adam',
weight_decay=args.weight_decay,
dropout=args.dropout,
gpu=not args.use_cpu,
batch_size=args.batch_size,
evaluation_steps=args.evaluation_steps,
num_workers=args.nproc,
transfer_learning_path=args.transfer_learning_path,
transfer_learning_selection=args.transfer_learning_selection
)
train_single_cnn(train_params_cnn)
elif args.mode == 'slice':
train_params_slice = Parameters(
args.mode,
args.tsv_path,
args.output_dir,
args.caps_dir,
args.preprocessing,
args.model
)
train_params_slice.write(
mri_plane=args.slice_direction,
diagnoses=args.diagnoses,
baseline=args.baseline,
minmaxnormalization=not args.unnormalize,
learning_rate=args.learning_rate,
patience=args.patience,
tolerance=args.tolerance,
n_splits=args.n_splits,
split=args.split,
accumulation_steps=args.accumulation_steps,
epochs=args.epochs,
batch_size=args.batch_size,
evaluation_steps=args.evaluation_steps,
optimizer='Adam',
weight_decay=args.weight_decay,
dropout=args.dropout,
gpu=not args.use_cpu,
num_workers=args.nproc,
selection_threshold=args.selection_threshold,
prepare_dl=args.use_extracted_slices,
discarded_slices=args.discarded_slices
)
train_single_cnn(train_params_slice)
elif args.mode == 'patch':
if args.mode_task == "autoencoder":
train_params_autoencoder = Parameters(
args.mode,
args.tsv_path,
args.output_dir,
args.caps_dir,
args.preprocessing,
args.model
)
train_params_autoencoder.write(
diagnoses=args.diagnoses,
baseline=args.baseline,
minmaxnormalization=not args.unnormalize,
n_splits=args.n_splits,
split=args.split,
accumulation_steps=args.accumulation_steps,
epochs=args.epochs,
learning_rate=args.learning_rate,
patience=args.patience,
tolerance=args.tolerance,
optimizer='Adam',
weight_decay=args.weight_decay,
gpu=not args.use_cpu,
batch_size=args.batch_size,
evaluation_steps=args.evaluation_steps,
num_workers=args.nproc,
patch_size=args.patch_size,
stride_size=args.stride_size,
hippocampus_roi=False,
visualization=args.visualization,
prepare_dl=args.use_extracted_patches
)
train_autoencoder(train_params_autoencoder)
elif args.mode_task == "cnn":
train_params_patch = Parameters(
args.mode,
args.tsv_path,
args.output_dir,
args.caps_dir,
args.preprocessing,
args.model
)
train_params_patch.write(
diagnoses=args.diagnoses,
baseline=args.baseline,
minmaxnormalization=not args.unnormalize,
n_splits=args.n_splits,
split=args.split,
accumulation_steps=args.accumulation_steps,
epochs=args.epochs,
learning_rate=args.learning_rate,
patience=args.patience,
tolerance=args.tolerance,
optimizer='Adam',
weight_decay=args.weight_decay,
dropout=args.dropout,
gpu=not args.use_cpu,
batch_size=args.batch_size,
evaluation_steps=args.evaluation_steps,
num_workers=args.nproc,
transfer_learning_path=args.transfer_learning_path,
transfer_learning_selection=args.transfer_learning_selection,
patch_size=args.patch_size,
stride_size=args.stride_size,
hippocampus_roi=False,
selection_threshold=args.selection_threshold,
prepare_dl=args.use_extracted_patches
)
train_single_cnn(train_params_patch)
else:
train_params_patch = Parameters(
args.mode,
args.tsv_path,
args.output_dir,
args.caps_dir,
args.preprocessing,
args.model
)
train_params_patch.write(
diagnoses=args.diagnoses,
baseline=args.baseline,
minmaxnormalization=not args.unnormalize,
n_splits=args.n_splits,
split=args.split,
accumulation_steps=args.accumulation_steps,
epochs=args.epochs,
learning_rate=args.learning_rate,
patience=args.patience,
tolerance=args.tolerance,
optimizer='Adam',
weight_decay=args.weight_decay,
dropout=args.dropout,
gpu=not args.use_cpu,
batch_size=args.batch_size,
evaluation_steps=args.evaluation_steps,
num_workers=args.nproc,
transfer_learning_path=args.transfer_learning_path,
transfer_learning_selection=args.transfer_learning_selection,
patch_size=args.patch_size,
stride_size=args.stride_size,
hippocampus_roi=False,
selection_threshold=args.selection_threshold,
prepare_dl=args.use_extracted_patches
)
train_multi_cnn(train_params_patch)
elif args.mode == 'roi':
if args.mode_task == "autoencoder":
train_params_autoencoder = Parameters(
args.mode,
args.tsv_path,
args.output_dir,
args.caps_dir,
args.preprocessing,
args.model
)
train_params_autoencoder.write(
diagnoses=args.diagnoses,
baseline=args.baseline,
minmaxnormalization=not args.unnormalize,
n_splits=args.n_splits,
split=args.split,
accumulation_steps=args.accumulation_steps,
epochs=args.epochs,
learning_rate=args.learning_rate,
patience=args.patience,
tolerance=args.tolerance,
optimizer='Adam',
weight_decay=args.weight_decay,
gpu=not args.use_cpu,
batch_size=args.batch_size,
evaluation_steps=args.evaluation_steps,
num_workers=args.nproc,
hippocampus_roi=True,
visualization=args.visualization,
)
train_autoencoder(train_params_autoencoder)
else:
train_params_patch = Parameters(
args.mode,
args.tsv_path,
args.output_dir,
args.caps_dir,
args.preprocessing,
args.model
)
train_params_patch.write(
diagnoses=args.diagnoses,
baseline=args.baseline,
minmaxnormalization=not args.unnormalize,
n_splits=args.n_splits,
split=args.split,
accumulation_steps=args.accumulation_steps,
epochs=args.epochs,
learning_rate=args.learning_rate,
patience=args.patience,
tolerance=args.tolerance,
optimizer='Adam',
weight_decay=args.weight_decay,
dropout=args.dropout,
gpu=not args.use_cpu,
batch_size=args.batch_size,
evaluation_steps=args.evaluation_steps,
num_workers=args.nproc,
transfer_learning_path=args.transfer_learning_path,
transfer_learning_selection=args.transfer_learning_selection,
hippocampus_roi=True,
selection_threshold=args.selection_threshold,
)
train_single_cnn(train_params_patch)
elif args.mode == 'svm':
raise NotImplementedError("The SVM commandline was not implement yet.")
else:
print('Mode not detected in clinicadl')
# Function to dispatch command line options from classify to corresponding
# function
def classify_func(args):
from .classify.inference import classify
classify(
args.caps_directory,
args.tsv_path,
args.model_path,
args.prefix_output,
gpu=not args.use_cpu,
prepare_dl=args.use_extracted_features
)
# Functions to dispatch command line options from tsvtool to corresponding
# function
def tsv_restrict_func(args):
from .tools.tsv.restriction import aibl_restriction, oasis_restriction
if args.dataset == "AIBL":
aibl_restriction(args.merged_tsv, args.results_path)
elif args.dataset == "OASIS":
oasis_restriction(args.merged_tsv, args.results_path)
def tsv_getlabels_func(args):
from .tools.tsv.data_formatting import get_labels
get_labels(
args.merged_tsv,
args.missing_mods,
args.results_path,
diagnoses=args.diagnoses,
modality=args.modality,
restriction_path=args.restriction_path,
time_horizon=args.time_horizon)
def tsv_split_func(args):
from .tools.tsv.data_split import split_diagnoses
split_diagnoses(
args.merged_tsv,
args.formatted_data_path,
n_test=args.n_test,
subset_name=args.subset_name,
age_name=args.age_name,
MCI_sub_categories=args.MCI_sub_categories,
t_val_threshold=args.t_val_threshold,
p_val_threshold=args.p_val_threshold)
def tsv_kfold_func(args):
from .tools.tsv.kfold_split import split_diagnoses
split_diagnoses(
args.formatted_data_path,
n_splits=args.n_splits,
subset_name=args.subset_name,
MCI_sub_categories=args.MCI_sub_categories)
def tsv_analysis_func(args):
from .tools.tsv.demographics_analysis import demographics_analysis
demographics_analysis(
args.merged_tsv,
args.formatted_data_path,
args.results_path,
diagnoses=args.diagnoses,
mmse_name=args.mmse_name,
age_name=args.age_name,
baseline=args.baseline)
def parse_command_line():
parser = argparse.ArgumentParser(
prog='clinicadl',
description='Deep learning software for neuroimaging datasets')
parser.add_argument('--verbose', '-v', action='count')
subparser = parser.add_subparsers(
title='''Task to execute with clinicadl:''',
description='''What kind of task do you want to use with clinicadl?
(tsvtool, preprocessing, extract, generate, train, validate, classify).''',
dest='task',
help='''****** Tasks proposed by clinicadl ******''')
subparser.required = True
# Generate synthetic data
generate_parser = subparser.add_parser(
'generate',
help='Generate synthetic data for functional tests.'
)
generate_parser.add_argument(
'mode',
help='Choose which dataset is generated (random, trivial).',
choices=['random', 'trivial'],
default='random'
)
generate_parser.add_argument(
'caps_dir',
help='Data using CAPS structure.',
default=None
)
generate_parser.add_argument(
'tsv_path',
help='TSV path with subjects/sessions to use for data generation.',
default=None
)
generate_parser.add_argument(
'output_dir',
help='Folder containing the synthetic dataset.',
default=None
)
generate_parser.add_argument(
'--n_subjects',
type=int,
default=300,
help="Number of subjects in each class of the synthetic dataset."
)
generate_parser.add_argument(
'--preprocessing',
type=str,
default='t1-linear',
choices=['t1-linear', 't1-extensive'],
help="Preprocessing used to generate synthetic data."
)
generate_parser.add_argument(
'--mean',
type=float,
default=0,
help="Mean value of the noise added for the random dataset."
)
generate_parser.add_argument(
'--sigma',
type=float,
default=0.5,
help="Standard deviation of the noise added for the random dataset."
)
generate_parser.add_argument(
'--mask_path',
type=str,
help='path to the extracted masks to generate the two labels.',
default=None
)
generate_parser.add_argument(
'--atrophy_percent',
type=float,
default=60,
help='percentage of atrophy applied'
)
generate_parser.set_defaults(func=generate_data_func)
# Preprocessing 1
# preprocessing_parser: get command line arguments and options for
# preprocessing
preprocessing_parser = subparser.add_parser(
'preprocessing',
help='Prepare data for training (needs clinica installed).')
preprocessing_parser.add_argument(
'bids_dir',
help='Data using BIDS structure.',
default=None
)
preprocessing_parser.add_argument(
'caps_dir',
help='Data using CAPS structure.',
default=None
)
preprocessing_parser.add_argument(
'tsv_file',
help='TSV file with subjects/sessions to process.',
default=None
)
preprocessing_parser.add_argument(
'working_dir',
help='Working directory to save temporary file.',
default=None
)
preprocessing_parser.add_argument(
'-np', '--nproc',
help='Number of cores used for processing (2 by default)',
type=int, default=2
)
preprocessing_parser.set_defaults(func=preprocessing_t1w_func)
# Preprocessing 2 - Extract data: slices or patches
# extract_parser: get command line argument and options
extract_parser = subparser.add_parser(
'extract',
help='Create data (slices or patches) for training.'
)
extract_parser.add_argument(
'caps_dir',
help='Data using CAPS structure.',
default=None
)
extract_parser.add_argument(
'tsv_file',
help='TSV file with subjects/sessions to process.',
default=None
)
extract_parser.add_argument(
'working_dir',
help='Working directory to save temporary file.',
default=None
)
extract_parser.add_argument(
'extract_method',
help='''Method used to extract features. Three options:
'image' to conver to PyTorch tensor the complete 3D image,
'patch' to extract 3D volumetric patches or
'slice' to get 2D slices from the image.''',
choices=['image', 'patch', 'slice'], default='image')
extract_parser.add_argument(
'-ps', '--patch_size',
help='''Patch size (only for 'patch' extraction) e.g: --patch_size 50''',
type=int, default=50)
extract_parser.add_argument(
'-ss', '--stride_size',
help='''Stride size (only for 'patch' extraction) e.g.: --stride_size 50''',
type=int, default=50)
extract_parser.add_argument(
'-sd', '--slice_direction',
help='''Slice direction (only for 'slice' extraction). Three options:
'0' -> Sagittal plane,
'1' -> Coronal plane or
'2' -> Axial plane''',
type=int, default=0)
extract_parser.add_argument(
'-sm', '--slice_mode',
help='''Slice mode (only for 'slice' extraction). Two options:
'single' to save the slice in one single channel,
'rgb' to save the slice in three identical channel.''',
choices=['single', 'rgb'], default='rgb')
extract_parser.add_argument(
'-np', '--nproc',
help='Number of cores used for processing',
type=int, default=2)
extract_parser.set_defaults(func=extract_data_func)
qc_parser = subparser.add_parser(
'quality_check',
help='Performs quality check procedure for t1-linear pipeline.'
'Original code can be found at https://github.com/vfonov/deep-qc'
)
qc_parser.add_argument("caps_dir",
help='Data using CAPS structure.',
type=str)
qc_parser.add_argument("tsv_file",
help='TSV path with subjects/sessions to process.',
type=str)
qc_parser.add_argument("output_path",
help="Path to the output tsv file (filename included).",
type=str)
qc_parser.add_argument("--threshold",
help='The threshold on the output probability to decide if the image passed or failed. '
'(default=0.5)',
type=float, default=0.5)
qc_parser.add_argument('--batch_size',
help='Batch size used in DataLoader (default=1).',
default=1, type=int)
qc_parser.add_argument("-np", "--nproc",
help='Number of cores used the quality check. (default=2)',
type=int, default=2)
qc_parser.add_argument('-cpu', '--use_cpu', action='store_true',
help='If provided, will use CPU instead of GPU.',
default=False)
qc_parser.set_defaults(func=qc_func)
# Train - Train CNN model with preprocessed data
# train_parser: get command line arguments and options
train_parser = subparser.add_parser(
'train',
help='Train with your data and create a model.')
train_subparser = train_parser.add_subparsers(
title='''Inputs types implemented in clinicadl''',
description='''What type of input do you want to use?
(image, patch, roi, slice).''',
dest='mode',
help='''****** Input types proposed by clinicadl ******''')
train_subparser.required | |
[(column, column) for column in keys]
else:
index_map = [(column, column) for column in keys]
metadata = self._metadata.copy(data_columns=data_columns, index_map=index_map)
# Sync Spark's columns as well.
sdf = self._sdf.select(['`{}`'.format(name) for name in metadata.columns])
if inplace:
self._metadata = metadata
self._sdf = sdf
else:
kdf = self.copy()
kdf._metadata = metadata
kdf._sdf = sdf
return kdf
def reset_index(self, level=None, drop=False, inplace=False):
"""For DataFrame with multi-level index, return new DataFrame with labeling information in
the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.
For a standard index, the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
:param level: int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by default
:param drop: boolean, default False
Do not try to insert index into dataframe columns. This resets the index to the
default integer index.
:param inplace: boolean, default False
Modify the DataFrame in place (do not create a new object)
:return: :class:`DataFrame`
"""
if len(self._metadata.index_map) == 0:
raise NotImplementedError('Can\'t reset index because there is no index.')
multi_index = len(self._metadata.index_map) > 1
def rename(index):
if multi_index:
return 'level_{}'.format(index)
else:
if 'index' not in self._metadata.data_columns:
return 'index'
else:
return 'level_{}'.format(index)
if level is None:
new_index_map = [(column, name if name is not None else rename(i))
for i, (column, name) in enumerate(self._metadata.index_map)]
index_map = []
else:
if isinstance(level, (int, str)):
level = [level]
level = list(level)
if all(isinstance(l, int) for l in level):
for lev in level:
if lev >= len(self._metadata.index_map):
raise IndexError('Too many levels: Index has only {} level, not {}'
.format(len(self._metadata.index_map), lev + 1))
idx = level
elif all(isinstance(lev, str) for lev in level):
idx = []
for l in level:
try:
i = self._metadata.index_columns.index(l)
idx.append(i)
except ValueError:
if multi_index:
raise KeyError('Level unknown not found')
else:
raise KeyError('Level unknown must be same as name ({})'
.format(self._metadata.index_columns[0]))
else:
raise ValueError('Level should be all int or all string.')
idx.sort()
new_index_map = []
index_map = self._metadata.index_map.copy()
for i in idx:
info = self._metadata.index_map[i]
index_column, index_name = info
new_index_map.append(
(index_column,
index_name if index_name is not None else rename(index_name)))
index_map.remove(info)
if drop:
new_index_map = []
metadata = self._metadata.copy(
data_columns=[column for column, _ in new_index_map] + self._metadata.data_columns,
index_map=index_map)
columns = [name for _, name in new_index_map] + self._metadata.data_columns
if inplace:
self._metadata = metadata
self.columns = columns
else:
kdf = self.copy()
kdf._metadata = metadata
kdf.columns = columns
return kdf
def isnull(self):
"""
Detects missing values for items in the current Dataframe.
Return a boolean same-sized Dataframe indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values.
See Also
--------
Dataframe.notnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.isnull()
0 1
0 False False
1 False True
2 False True
3 False False
>>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])
>>> df.isnull()
0 1 2
0 True False True
1 False True False
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.isnull()
return kdf
isna = isnull
def notnull(self):
"""
Detects non-missing values for items in the current Dataframe.
This function takes a dataframe and indicates whether it's
values are valid (not missing, which is ``NaN`` in numeric
datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).
See Also
--------
Dataframe.isnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.notnull()
0 1
0 True True
1 True False
2 True False
3 True True
>>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df.notnull()
0 1 2
0 True True True
1 True False True
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.notnull()
return kdf
notna = notnull
def to_koalas(self):
"""
Converts the existing DataFrame into a Koalas DataFrame.
This method is monkey-patched into Spark's DataFrame and can be used
to convert a Spark DataFrame into a Koalas DataFrame. If running on
an existing Koalas DataFrame, the method returns itself.
If a Koalas DataFrame is converted to a Spark DataFrame and then back
to Koalas, it will lose the index information and the original index
will be turned into a normal column.
See Also
--------
DataFrame.to_spark
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
>>> spark_df = df.to_spark()
>>> spark_df
DataFrame[__index_level_0__: bigint, col1: bigint, col2: bigint]
>>> kdf = spark_df.to_koalas()
>>> kdf
__index_level_0__ col1 col2
0 0 1 3
1 1 2 4
Calling to_koalas on a Koalas DataFrame simply returns itself.
>>> df.to_koalas()
col1 col2
0 1 3
1 2 4
"""
if isinstance(self, DataFrame):
return self
else:
return DataFrame(self)
def to_spark(self):
"""
Return the current DataFrame as a Spark DataFrame.
See Also
--------
DataFrame.to_koalas
"""
return self._sdf
def to_pandas(self):
"""
Return a Pandas DataFrame.
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.to_pandas()
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
"""
sdf = self._sdf.select(['`{}`'.format(name) for name in self._metadata.columns])
pdf = sdf.toPandas()
if len(pdf) == 0 and len(sdf.schema) > 0:
# TODO: push to OSS
pdf = pdf.astype({field.name: to_arrow_type(field.dataType).to_pandas_dtype()
for field in sdf.schema})
index_columns = self._metadata.index_columns
if len(index_columns) > 0:
append = False
for index_field in index_columns:
drop = index_field not in self._metadata.data_columns
pdf = pdf.set_index(index_field, drop=drop, append=append)
append = True
pdf = pdf[self._metadata.data_columns]
index_names = self._metadata.index_names
if len(index_names) > 0:
if isinstance(pdf.index, pd.MultiIndex):
pdf.index.names = index_names
else:
pdf.index.name = index_names[0]
return pdf
# Alias to maintain backward compatibility with Spark
toPandas = to_pandas
def assign(self, **kwargs):
"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though Koalas doesn't check it).
If the values are not callable, (e.g. a Series or a literal),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Examples
--------
>>> df = ks.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence and you can also
create multiple columns within the same assign.
>>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,
... temp_k=df['temp_c'] + 273.15)
>>> assigned[['temp_c', 'temp_f', 'temp_k']]
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
Notes
-----
Assigning multiple columns within the same ``assign`` is possible
but you cannot refer to newly created or modified columns. This
feature is supported in pandas for Python 3.6 and later but not in
Koalas. In Koalas, all items are computed first, and then assigned.
"""
from databricks.koalas.series import Series
for k, v in kwargs.items():
if not (isinstance(v, (Series, spark.Column)) or
callable(v) or pd.api.types.is_scalar(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(type(v).__name__))
if callable(v):
kwargs[k] = v(self)
pairs = list(kwargs.items())
sdf = self._sdf
for (name, c) in pairs:
if isinstance(c, Series):
sdf = sdf.withColumn(name, c._scol)
elif isinstance(c, Column):
sdf = sdf.withColumn(name, c)
else:
sdf = sdf.withColumn(name, | |
kwargs.get("sim_mol_or_smiles", None)
sim_id = kwargs.get("sim_id", None)
query_fp = None
if not isinstance(props, list):
props = [props]
# make all props lower-case:
props = list(map(lambda x: x.lower(), props))
if sim_id is not None: # sim_id represents a Compound_Id,
# which is then taken as the Similarity base
sim_mol_or_smiles = self.show_cpd(sim_id, is_cpd_id=True,
make_copy=True, show_smiles=False)[0]
if sim_mol_or_smiles is not None:
if isinstance(sim_mol_or_smiles, str):
sim_mol_or_smiles = Chem.MolFromSmiles(sim_mol_or_smiles)
# use pre-calculated fingerprints whenever possible
if sim_mol_or_smiles.HasProp("FP_b64"):
query_fp = pickle.loads(base64.b64decode(sim_mol_or_smiles.GetProp("FP_b64")))
else:
murcko_mol = MurckoScaffold.GetScaffoldForMol(sim_mol_or_smiles)
if USE_FP == "morgan":
query_fp = Desc.rdMolDescriptors.GetMorganFingerprintAsBitVect(murcko_mol, 2)
elif USE_FP == "avalon":
query_fp = pyAv.GetAvalonFP(murcko_mol, 1024)
else:
query_fp = FingerprintMols.FingerprintMol(murcko_mol)
ctr = 0
calculated_props = set()
for mol in self:
if not mol: continue
if "molid" in props:
ctr += 1
mol.SetProp("Mol_Id", str(ctr))
calculated_props.add("molid")
calc_props(mol, props, force2d=force2d, query_fp=query_fp,
calculated_props=calculated_props, **kwargs)
self._set_recalc_needed()
not_calculated = set(props) - calculated_props
if not_calculated:
print("* these props could not be calculated:", not_calculated)
def remove_props(self, props):
"""Remove properties from the Mol_List.
props can be a single property or a list of properties."""
for mol in self:
if mol:
remove_props_from_mol(mol, props)
self._set_recalc_needed()
def remove_empty_props(self):
remove_empty_props(self)
self._set_recalc_needed()
def keep_props(self, props):
"""Keep properties in the Mol_List.
props can be a single property or a list of properties."""
if not isinstance(props, list):
props = [props]
for mol in self:
if mol:
keep_props_in_mol(mol, props)
self.order = props.copy()
self._set_recalc_needed()
def keep_largest_fragment(self):
"""Removes salts, etc.
Returns a new Mol_List instance. The original properties are copied over."""
frag_counter = 0
new_list = self.new()
if self.order:
new_list.order = self.order.copy()
new_list.ia = self.ia
for mol in self:
mols = Chem.GetMolFrags(mol, asMols=True)
if len(mols) > 1:
frag_counter += 1
mols = sorted(mols, key=Desc.HeavyAtomCount, reverse=True)
new_mol = mols[0]
copy_mol_props(mol, new_mol)
else:
new_mol = deepcopy(mol)
new_list.append(new_mol)
print(" > small fragments were removed in {} molecules.".format(frag_counter))
return new_list
def copy_prop(self, prop_orig, prop_copy, move=False):
"""Copy or rename a property in the Mol_List."""
for mol in self.mols_with_prop(prop_orig):
val_orig = mol.GetProp(prop_orig)
mol.SetProp(prop_copy, val_orig)
if move:
mol.ClearProp(prop_orig)
self._set_recalc_needed()
def rename_prop(self, prop_orig, prop_new):
"""Convenience wrapper around copy_prop"""
self.copy_prop(prop_orig, prop_new, move=True)
def remove_dups_by_id(self, id_prop=None, make_copy=True):
"""Remove duplicate records by Compound Id.
Parameters:
id_prop (None, str): The name of the Id property, if *None*, it will be guessed.
Returns:
new Mol_list without the duplicate Ids.
By default it creates an independent copy of the mol objects."""
new_list = self.new()
if self.order:
new_list.order = self.order.copy()
new_list.ia = self.ia
id_list = []
if not id_prop:
id_prop = guess_id_prop(list_fields(self))
if not id_prop:
print("* could not determine Id property.")
return None
for mol in self:
if not mol: continue
mol_id = mol.GetProp(id_prop)
if mol_id in id_list: continue
id_list.append(mol_id)
if make_copy:
mol = deepcopy(mol)
new_list.append(mol)
return new_list
def remove_by_id(self, cpd_id, id_prop=None, make_copy=True):
"""Remove molecules records by Compound Id.
Parameters:
id_prop (None, str): The name of the Id property, if *None*, it will be guessed.
Returns:
new Mol_list without the duplicate Ids.
By default it creates an independent copy of the mol objects."""
if not isinstance(cpd_id, list):
cpd_id = [cpd_id]
new_list = self.new()
if self.order:
new_list.order = self.order.copy()
new_list.ia = self.ia
if not id_prop:
id_prop = guess_id_prop(list_fields(self))
if not id_prop:
print("* could not determine Id property.")
return None
for mol in self:
if not mol: continue
mol_id = get_value(mol.GetProp(id_prop))
if mol_id in cpd_id: continue
if make_copy:
mol = deepcopy(mol)
new_list.append(mol)
return new_list
def remove_dups_by_struct(self, make_copy=True):
"""Remove duplicates by structure. Duplicates are determined by Smiles.
Returns:
new Mol_List without the duplicate structures.
By default it creates an independent copy of the mol objects. """
new_list = self.new()
if self.order:
new_list.order = self.order.copy()
new_list.ia = self.ia
smiles_list = []
for mol in self:
if not mol: continue
smiles = Chem.MolToSmiles(mol, isomericSmiles=True) # needed to distinguish between stereoisomers
if smiles in smiles_list: continue
smiles_list.append(smiles)
if make_copy:
mol = deepcopy(mol)
new_list.append(mol)
return new_list
def enum_racemates(self, find_only=True):
"""returns: result_sdf::list<mol>, racemic_molids::list<int>
find_only==True: return new sdf as list which contains all the racemates of the input sdf.
find_only==False: return new sdf as list with ALL input structures, where the racemates are
replaced by their two enantiomers. The returned sdf is always
equal in size or larger as the input sdf.
Multiple stereo centers are not yet handled.
In the new sdf the molids are no longer unique and should be reassigned
(remove molid and run calc_props(sdf))."""
chirality = {"R": Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,
"S": Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW}
prop_list = self.fields
if self.id_prop is not None:
if self.id_prop not in prop_list:
raise LookupError("id_prop not found in data set.")
else: # try to guess an id_prop
self.id_prop = guess_id_prop(prop_list)
result = self.new()
racemic_molids = []
for mol in self:
chiral_centers = Chem.FindMolChiralCenters(mol, includeUnassigned=True)
undefined_centers = [center[0] for center in chiral_centers if center[1] == "?"]
if undefined_centers:
racemic_molids.append(get_value(mol.GetProp(self.id_prop)))
if find_only:
result.append(mol)
continue
else:
num_stereocenters = len(undefined_centers)
stereocenters = product("RS", repeat=num_stereocenters)
for stereo in stereocenters:
new_mol = Chem.Mol(mol)
for idx, center in enumerate(undefined_centers):
new_mol.GetAtomWithIdx(center).SetChiralTag(chirality[stereo[idx]])
result.append(new_mol)
else:
if not find_only: # return ALL mols
result.append(mol)
return result, racemic_molids
def join_data_from_file(self, fn, id_prop=None, decimals=2):
"""Joins data from a file with name ``fn`` by Id property ``id_prop``. If no Id property is given, it will be guessed.
CAUTION: The records from the file are loaded into memory!
Parameters:
decimals (int): number of decimal places for floating point values."""
if not id_prop:
id_prop = guess_id_prop(self.field_types)
file_d = {}
for line in csv_supplier(fn):
rec_id = get_value(line.pop(id_prop))
file_d[rec_id] = line
for mol in self:
mol_id = get_prop_val(mol, id_prop)
if mol_id in file_d:
records = file_d[mol_id]
for rec in records:
val = get_value(records[rec])
if val is None: continue
if isinstance(val, float):
mol.SetProp(rec, "{val:.{decimals}f}".format(val=val, decimals=decimals))
else:
mol.SetProp(rec, str(val))
self._set_recalc_needed()
def set_default(self, prop, def_val, condition=None):
"""Set a default value in all mols, in which ``prop`` is either not defined (``condition`` == None) or
is evaluating ``condition`` to true."""
failed = 0
if condition and not isinstance(condition, str):
raise TypeError("condition needs to be of type str.")
for mol in self:
if not mol: continue
if not condition:
if not mol.HasProp(prop):
mol.SetProp(prop, str(def_val))
else:
if mol.HasProp(prop):
prop_val = get_value(mol.GetProp(prop))
if isinstance(prop_val, str):
eval_templ = """'{}' {}"""
else:
eval_templ = """{} {}"""
try:
if eval(eval_templ.format(prop_val, condition)):
mol.SetProp(prop, str(def_val))
except SyntaxError:
failed += 1
self.recalc_needed["d"] = True
self.recalc_needed["field_types"] = True
if failed > 0:
print("# {} records could not be processed.".format(failed))
def table(self, pagesize=25, highlight=None, show_hidden=False, img_dir=None, raw=False):
"""Return the Mol_List as HTML table.
Either as raw HTML (raw==True) or as HTML object for display in IPython notebook.
Parameters:
show_hidden (bool): Whether to show hidden properties (name starts with _) or not.
Default is False.
raw (bool): If True, return the HTML mol grid as text.
If False, return a HTML object, that can be displayed in the Jupyter Notebook.
Default is False.
img_dir (str or None): The directory, in which the molecule images are written. The directory has to exist.
Implies raw=True. If None, then the images are stored in the HTML object. Default is None."""
if self.id_prop is None:
self.id_prop = guess_id_prop(list_fields(self))
if img_dir is not None:
raw = True
if raw:
return mol_table(self, id_prop=self.id_prop, highlight=highlight, interact=self.ia,
order=self.order, img_dir=img_dir, show_hidden=show_hidden)
else:
return table_pager(self, pagesize=pagesize, id_prop=self.id_prop, interact=self.ia, highlight=highlight, order=self.order,
show_hidden=show_hidden)
def nested(self, pagesize=10, props=None, img_dir=None, raw=False):
if self.id_prop is None:
self.id_prop = guess_id_prop(list_fields(self))
if img_dir is not None:
raw = True
if raw:
return nested_table(self, id_prop=self.id_prop, props=props, order=self.order, img_dir=img_dir)
else:
return nested_pager(self, pagesize=pagesize, id_prop=self.id_prop, props=props, order=self.order)
def grid(self, pagesize=12, props=None, highlight=None, mols_per_row=4, size=IMG_GRID_SIZE, img_dir=None, raw=False):
"""Returns:
The Mol_List as HTML grid table. Either as raw HTML (raw==True) or as HTML object for display in IPython notebook.
Parameters:
props: A property or a list of properties to include in the display.
raw (bool): If True, return the HTML mol grid as text.
If False, return a HTML object, that can be displayed in the Jupyter Notebook.
Default is False.
img_dir (str or None): The directory, in which the molecule images are written. The directory has to exist.
Implies raw=True. If None, then the images are stored in the HTML object. Default is None."""
if self.id_prop | |
"""
PREDSTORM real time solar wind forecasting from L1 solar wind data
predicting the L1 solar wind and Dst index with analogue ensembles
for similar algorithms see Riley et al. 2017, Owens et al. 2017
Author: <NAME>, IWF Graz, Austria
twitter @chrisoutofspace, https://github.com/IWF-helio
started April 2018, last update August 2019
python 3.7 with sunpy
method
semi-supervised learning: add known intervals of ICMEs, MFRs and CIRs in the training data
helcats lists for ICMEs at Wind since 2007
HSS e.g. https://link.springer.com/article/10.1007%2Fs11207-013-0355-z
https://en.wikipedia.org/wiki/Pattern_recognition
Things to do:
use recarrays!
DSCOVR data:
Nans for missing data should be handled better and interpolated over, OBrien stops with Nans
training data:
use stereo one hour data as training data set, corrected for 1 AU
use VEX and MESSENGER as tests for HelioRing like forecasts, use STEREO at L5 for training data of the last few days
forecast plot:
add approximate levels of Dst for each location to see aurora, taken from ovation prime/worldview and Dst
add Temerin and Li method and kick out Burton/OBrien; make error bars for Dst
take mean of ensemble forecast for final blue line forecast or only best match?
MIT LICENSE
Copyright 2018, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
##########################################################################################
####################################### CODE START #######################################
##########################################################################################
################################## INPUT PARAMETERS ######################################
import os
import sys
import getopt
# READ INPUT OPTIONS FROM COMMAND LINE
argv = sys.argv[1:]
opts, args = getopt.getopt(argv,"h",["server", "help"])
server = False
if "--server" in [o for o, v in opts]:
server = True
print("In server mode!")
import matplotlib
if server:
matplotlib.use('Agg') # important for server version, otherwise error when making figures
else:
matplotlib.use('Qt5Agg') # figures are shown on mac
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
from matplotlib.dates import num2date, date2num, DateFormatter
import numpy as np
import time
import pickle
import copy
import pdb
import urllib
import json
import seaborn as sns
import scipy
from scipy import stats
import sunpy.time
import predstorm as ps
from predstorm_l1_input import *
#========================================================================================
#--------------------------------- FUNCTIONS --------------------------------------------
#========================================================================================
def get_dscovr_data_real_old():
"""
Downloads and returns DSCOVR data
data from http://services.swpc.noaa.gov/products/solar-wind/
if needed replace with ACE
http://legacy-www.swpc.noaa.gov/ftpdir/lists/ace/
get 3 or 7 day data
url_plasma='http://services.swpc.noaa.gov/products/solar-wind/plasma-3-day.json'
url_mag='http://services.swpc.noaa.gov/products/solar-wind/mag-3-day.json'
Parameters
==========
None
Returns
=======
(data_minutes, data_hourly)
data_minutes : np.rec.array
Array of interpolated minute data with format:
dtype=[('time','f8'),('btot','f8'),('bxgsm','f8'),('bygsm','f8'),('bzgsm','f8'),\
('speed','f8'),('den','f8'),('temp','f8')]
data_hourly : np.rec.array
Array of interpolated hourly data with format:
dtype=[('time','f8'),('btot','f8'),('bxgsm','f8'),('bygsm','f8'),('bzgsm','f8'),\
('speed','f8'),('den','f8'),('temp','f8')]
"""
url_plasma='http://services.swpc.noaa.gov/products/solar-wind/plasma-7-day.json'
url_mag='http://services.swpc.noaa.gov/products/solar-wind/mag-7-day.json'
#download, see URLLIB https://docs.python.org/3/howto/urllib2.html
with urllib.request.urlopen(url_plasma) as url:
pr = json.loads (url.read().decode())
with urllib.request.urlopen(url_mag) as url:
mr = json.loads(url.read().decode())
logger.info('get_dscovr_data_real: DSCOVR plasma data available')
logger.info(str(pr[0]))
logger.info('get_dscovr_data_real: DSCOVR MAG data available')
logger.info(str(mr[0]))
#kill first row which stems from the description part
pr=pr[1:]
mr=mr[1:]
#define variables
#plasma
rptime_str=['']*len(pr)
rptime_num=np.zeros(len(pr))
rpv=np.zeros(len(pr))
rpn=np.zeros(len(pr))
rpt=np.zeros(len(pr))
#mag
rbtime_str=['']*len(mr)
rbtime_num=np.zeros(len(mr))
rbtot=np.zeros(len(mr))
rbzgsm=np.zeros(len(mr))
rbygsm=np.zeros(len(mr))
rbxgsm=np.zeros(len(mr))
#convert variables to numpy arrays
#mag
for k in np.arange(0,len(mr),1):
#handle missing data, they show up as None from the JSON data file
if mr[k][6] is None: mr[k][6]=np.nan
if mr[k][3] is None: mr[k][3]=np.nan
if mr[k][2] is None: mr[k][2]=np.nan
if mr[k][1] is None: mr[k][1]=np.nan
rbtot[k]=float(mr[k][6])
rbzgsm[k]=float(mr[k][3])
rbygsm[k]=float(mr[k][2])
rbxgsm[k]=float(mr[k][1])
#convert time from string to datenumber
rbtime_str[k]=mr[k][0][0:16]
rbtime_num[k]=date2num(datetime.strptime(rbtime_str[k], "%Y-%m-%d %H:%M"))
#plasma
for k in np.arange(0,len(pr),1):
if pr[k][2] is None: pr[k][2]=np.nan
rpv[k]=float(pr[k][2]) #speed
rptime_str[k]=pr[k][0][0:16]
rptime_num[k]=date2num(datetime.strptime(rbtime_str[k], "%Y-%m-%d %H:%M"))
if pr[k][1] is None: pr[k][1]=np.nan
rpn[k]=float(pr[k][1]) #density
if pr[k][3] is None: pr[k][3]=np.nan
rpt[k]=float(pr[k][3]) #temperature
#interpolate to minutes
#rtimes_m=np.arange(rbtime_num[0],rbtime_num[-1],1.0000/(24*60))
rtimes_m= round_to_hour(num2date(rbtime_num[0])) + np.arange(0,len(rbtime_num)) * timedelta(minutes=1)
#convert back to matplotlib time
rtimes_m=date2num(rtimes_m)
rbtot_m=np.interp(rtimes_m,rbtime_num,rbtot)
rbzgsm_m=np.interp(rtimes_m,rbtime_num,rbzgsm)
rbygsm_m=np.interp(rtimes_m,rbtime_num,rbygsm)
rbxgsm_m=np.interp(rtimes_m,rbtime_num,rbxgsm)
rpv_m=np.interp(rtimes_m,rptime_num,rpv)
rpn_m=np.interp(rtimes_m,rptime_num,rpn)
rpt_m=np.interp(rtimes_m,rptime_num,rpt)
#interpolate to hours
#rtimes_h=np.arange(np.ceil(rbtime_num)[0],rbtime_num[-1],1.0000/24.0000)
rtimes_h= round_to_hour(num2date(rbtime_num[0])) + np.arange(0,len(rbtime_num)/(60)) * timedelta(hours=1)
rtimes_h=date2num(rtimes_h)
rbtot_h=np.interp(rtimes_h,rbtime_num,rbtot)
rbzgsm_h=np.interp(rtimes_h,rbtime_num,rbzgsm)
rbygsm_h=np.interp(rtimes_h,rbtime_num,rbygsm)
rbxgsm_h=np.interp(rtimes_h,rbtime_num,rbxgsm)
rpv_h=np.interp(rtimes_h,rptime_num,rpv)
rpn_h=np.interp(rtimes_h,rptime_num,rpn)
rpt_h=np.interp(rtimes_h,rptime_num,rpt)
#make recarrays
data_hourly=np.rec.array([rtimes_h,rbtot_h,rbxgsm_h,rbygsm_h,rbzgsm_h,rpv_h,rpn_h,rpt_h], \
dtype=[('time','f8'),('btot','f8'),('bxgsm','f8'),('bygsm','f8'),('bzgsm','f8'),\
('speed','f8'),('den','f8'),('temp','f8')])
data_minutes=np.rec.array([rtimes_m,rbtot_m,rbxgsm_m,rbygsm_m,rbzgsm_m,rpv_m,rpn_m,rpt_m], \
dtype=[('time','f8'),('btot','f8'),('bxgsm','f8'),('bygsm','f8'),('bzgsm','f8'),\
('speed','f8'),('den','f8'),('temp','f8')])
return data_minutes, data_hourly
def get_omni_data_old():
"""FORMAT(2I4,I3,I5,2I3,2I4,14F6.1,F9.0,F6.1,F6.0,2F6.1,F6.3,F6.2, F9.0,F6.1,F6.0,2F6.1,F6.3,2F7.2,F6.1,I3,I4,I6,I5,F10.2,5F9.2,I3,I4,2F6.1,2I6,F5.1)
1963 1 0 1771 99 99 999 999 999.9 999.9 999.9 999.9 999.9 999.9 999.9 999.9 999.9 999.9 999.9 999.9 999.9 999.9 9999999. 999.9 9999. 999.9 999.9 9.999 99.99 9999999. 999.9 9999. 999.9 999.9 9.999 999.99 999.99 999.9 7 23 -6 119 999999.99 99999.99 99999.99 99999.99 99999.99 99999.99 0 3 999.9 999.9 99999 99999 99.9
define variables from OMNI2 dataset
see http://omniweb.gsfc.nasa.gov/html/ow_data.html
omni2_url='ftp://nssdcftp.gsfc.nasa.gov/pub/data/omni/low_res_omni/omni2_all_years.dat'
"""
#check how many rows exist in this file
f=open('data/omni2_all_years.dat')
dataset= len(f.readlines())
#print(dataset)
#global Variables
spot=np.zeros(dataset)
btot=np.zeros(dataset) #floating points
bx=np.zeros(dataset) #floating points
by=np.zeros(dataset) #floating points
bz=np.zeros(dataset) #floating points
bzgsm=np.zeros(dataset) #floating points
bygsm=np.zeros(dataset) #floating points
speed=np.zeros(dataset) #floating points
speedx=np.zeros(dataset) #floating points
speed_phi=np.zeros(dataset) #floating points
speed_theta=np.zeros(dataset) #floating points
dst=np.zeros(dataset) #float
kp=np.zeros(dataset) #float
den=np.zeros(dataset) #float
pdyn=np.zeros(dataset) #float
year=np.zeros(dataset)
day=np.zeros(dataset)
hour=np.zeros(dataset)
t=np.zeros(dataset) #index time
j=0
print('Read OMNI2 data ...')
with open('data/omni2_all_years.dat') as f:
for line in f:
line = line.split() # to deal with blank
#print line #41 is Dst index, in nT
dst[j]=line[40]
kp[j]=line[38]
if dst[j] == 99999: dst[j]=np.NaN
#40 is sunspot number
spot[j]=line[39]
#if spot[j] == 999: spot[j]=NaN
#25 is bulkspeed F6.0, in km/s
speed[j]=line[24]
if speed[j] == 9999: speed[j]=np.NaN
#get speed angles F6.1
speed_phi[j]=line[25]
if speed_phi[j] == 999.9: speed_phi[j]=np.NaN
speed_theta[j]=line[26]
if speed_theta[j] == 999.9: speed_theta[j]=np.NaN
#convert speed to GSE x see OMNI website footnote
speedx[j] = - speed[j] * np.cos(np.radians(speed_theta[j])) * np.cos(np.radians(speed_phi[j]))
#9 is total B F6.1 also fill ist 999.9, in nT
btot[j]=line[9]
if btot[j] == 999.9: btot[j]=np.NaN
#GSE components from 13 to 15, so 12 to 14 index, in nT
bx[j]=line[12]
if bx[j] == 999.9: bx[j]=np.NaN
by[j]=line[13]
if by[j] == 999.9: by[j]=np.NaN
bz[j]=line[14]
if bz[j] == 999.9: bz[j]=np.NaN
#GSM
bygsm[j]=line[15]
if bygsm[j] == 999.9: bygsm[j]=np.NaN
bzgsm[j]=line[16]
if bzgsm[j] == 999.9: bzgsm[j]=np.NaN
#24 in file, index 23 proton density /ccm
den[j]=line[23]
if den[j] == 999.9: den[j]=np.NaN
#29 in file, index 28 Pdyn, F6.2, fill values sind 99.99, in nPa
pdyn[j]=line[28]
if pdyn[j] == 99.99: pdyn[j]=np.NaN
year[j]=line[0]
day[j]=line[1]
hour[j]=line[2]
j=j+1
#convert time to matplotlib format
#http://matplotlib.org/examples/pylab_examples/date_demo2.html
times1=np.zeros(len(year)) #datetime time
print('convert time start')
for index in range(0,len(year)):
#first to datetimeobject
timedum=datetime(int(year[index]), 1, 1) + timedelta(day[index] - 1) +timedelta(hours=hour[index])
#then to matlibplot dateformat:
times1[index] = date2num(timedum)
print('convert time done') #for time conversion
print('all done.')
print(j, ' datapoints') #for reading data from OMNI file
#make structured array of data
omni_data=np.rec.array([times1,btot,bx,by,bz,bygsm,bzgsm,speed,speedx,den,pdyn,dst,kp], \
dtype=[('time','f8'),('btot','f8'),('bx','f8'),('by','f8'),('bz','f8'),\
('bygsm','f8'),('bzgsm','f8'),('speed','f8'),('speedx','f8'),('den','f8'),('pdyn','f8'),('dst','f8'),('kp','f8')])
return omni_data
def round_to_hour(dt):
'''
round datetime objects to nearest hour
'''
dt_start_of_hour = dt.replace(minute=0, second=0, microsecond=0)
dt_half_hour = dt.replace(minute=30, second=0, microsecond=0)
if dt >= dt_half_hour:
# round up
dt = dt_start_of_hour + timedelta(hours=1)
else:
# round down
dt = dt_start_of_hour
return dt
#========================================================================================
#--------------------------------- MAIN PROGRAM -----------------------------------------
#========================================================================================
plt.close('all')
print()
print()
print('------------------------------------------------------------------------')
print()
print('PREDSTORM L1 v1 method for geomagnetic storm and aurora forecasting. ')
print('<NAME>, IWF Graz, last update August 2019.')
print()
print('Based on results by Riley et al. 2017 Space Weather, and')
print('Owens, Riley and Horbury 2017 Solar Physics. ')
print()
print('This is a pattern recognition technique that searches ')
print('for similar intervals in historic data as the current solar wind - also known as Analogue Ensembles (AnEn).')
print()
print('This is the real time version by <NAME>, IWF Graz, Austria. Last update: April 2019. ')
print()
print('------------------------------------------------------------------------')
logger = ps.init_logging()
if os.path.isdir('real') == False:
os.mkdir('real')
if os.path.isdir('data') == False:
os.mkdir('data')
#================================== (1) GET DATA ========================================
######################### (1a) get real time DSCOVR data ##################################
logger.info("Loading real-time DSCOVR data...")
dscovr = ps.get_dscovr_realtime_data()
# get time of the last entry in the DSCOVR data
timenow = dscovr['time'][-1]
timenowstr = num2date(timenow).strftime("%Y-%m-%d %H:%M")
# get UTC time now
timestamp = datetime.utcnow()
timeutc = date2num(timestamp)
timeutcstr = timestamp.strftime("%Y-%m-%d %H:%M")
print()
print()
print('Current time UTC')
print(timeutcstr)
print('UTC Time of last datapoint in real time DSCOVR data')
print(timenowstr)
print('Time lag in minutes:', int(round((timeutc-timenow)*24*60)))
print()
logger.info('Load real time Dst from Kyoto via NOAA')
dst = ps.get_noaa_dst()
logger.info("Loading OMNI2 dataset...")
if not os.path.exists('data/omni2_all_years.dat'):
omni = ps.get_omni_data(download=True)
pickle.dump(omni, open('data/omni2_all_years_pickle.p', 'wb') )
#see http://omniweb.gsfc.nasa.gov/html/ow_data.html
| |
^( INITSTATE_ ID )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(INITSTATE_, "INITSTATE_")
, root_1)
self._adaptor.addChild(root_1,
stream_ID.nextNode()
)
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "state_decl"
class data_decl_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "data_decl"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:179:5: data_decl : DATA ID SEMICOLON -> ^( DATA_ ID ) ;
def data_decl(self, ):
retval = self.data_decl_return()
retval.start = self.input.LT(1)
root_0 = None
DATA32 = None
ID33 = None
SEMICOLON34 = None
DATA32_tree = None
ID33_tree = None
SEMICOLON34_tree = None
stream_DATA = RewriteRuleTokenStream(self._adaptor, "token DATA")
stream_SEMICOLON = RewriteRuleTokenStream(self._adaptor, "token SEMICOLON")
stream_ID = RewriteRuleTokenStream(self._adaptor, "token ID")
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:179:15: ( DATA ID SEMICOLON -> ^( DATA_ ID ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:179:17: DATA ID SEMICOLON
pass
DATA32 = self.match(self.input, DATA, self.FOLLOW_DATA_in_data_decl1093)
stream_DATA.add(DATA32)
ID33 = self.match(self.input, ID, self.FOLLOW_ID_in_data_decl1095)
stream_ID.add(ID33)
SEMICOLON34 = self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_data_decl1097)
stream_SEMICOLON.add(SEMICOLON34)
# AST Rewrite
# elements: ID
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 179:35: -> ^( DATA_ ID )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:179:38: ^( DATA_ ID )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(DATA_, "DATA_")
, root_1)
self._adaptor.addChild(root_1,
stream_ID.nextNode()
)
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "data_decl"
class id_decl_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "id_decl"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:180:5: id_decl : ( set_decl )* NID ID SEMICOLON -> ^( ID_ ( set_decl )* ID ) ;
def id_decl(self, ):
retval = self.id_decl_return()
retval.start = self.input.LT(1)
root_0 = None
NID36 = None
ID37 = None
SEMICOLON38 = None
set_decl35 = None
NID36_tree = None
ID37_tree = None
SEMICOLON38_tree = None
stream_SEMICOLON = RewriteRuleTokenStream(self._adaptor, "token SEMICOLON")
stream_NID = RewriteRuleTokenStream(self._adaptor, "token NID")
stream_ID = RewriteRuleTokenStream(self._adaptor, "token ID")
stream_set_decl = RewriteRuleSubtreeStream(self._adaptor, "rule set_decl")
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:180:13: ( ( set_decl )* NID ID SEMICOLON -> ^( ID_ ( set_decl )* ID ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:180:15: ( set_decl )* NID ID SEMICOLON
pass
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:180:15: ( set_decl )*
while True: #loop5
alt5 = 2
LA5_0 = self.input.LA(1)
if (LA5_0 == SET) :
alt5 = 1
if alt5 == 1:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:180:15: set_decl
pass
self._state.following.append(self.FOLLOW_set_decl_in_id_decl1116)
set_decl35 = self.set_decl()
self._state.following.pop()
stream_set_decl.add(set_decl35.tree)
else:
break #loop5
NID36 = self.match(self.input, NID, self.FOLLOW_NID_in_id_decl1119)
stream_NID.add(NID36)
ID37 = self.match(self.input, ID, self.FOLLOW_ID_in_id_decl1121)
stream_ID.add(ID37)
SEMICOLON38 = self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_id_decl1123)
stream_SEMICOLON.add(SEMICOLON38)
# AST Rewrite
# elements: set_decl, ID
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 180:42: -> ^( ID_ ( set_decl )* ID )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:180:45: ^( ID_ ( set_decl )* ID )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(ID_, "ID_")
, root_1)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:180:51: ( set_decl )*
while stream_set_decl.hasNext():
self._adaptor.addChild(root_1, stream_set_decl.nextTree())
stream_set_decl.reset();
self._adaptor.addChild(root_1,
stream_ID.nextNode()
)
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "id_decl"
class set_decl_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "set_decl"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:181:5: set_decl : SET OEBRACE val_range CEBRACE -> ^( SET_ val_range ) ;
def set_decl(self, ):
retval = self.set_decl_return()
retval.start = self.input.LT(1)
root_0 = None
SET39 = None
OEBRACE40 = None
CEBRACE42 = None
val_range41 = None
SET39_tree = None
OEBRACE40_tree = None
CEBRACE42_tree = None
stream_SET = RewriteRuleTokenStream(self._adaptor, "token SET")
stream_CEBRACE = RewriteRuleTokenStream(self._adaptor, "token CEBRACE")
stream_OEBRACE = RewriteRuleTokenStream(self._adaptor, "token OEBRACE")
stream_val_range = RewriteRuleSubtreeStream(self._adaptor, "rule val_range")
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:181:14: ( SET OEBRACE val_range CEBRACE -> ^( SET_ val_range ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:181:16: SET OEBRACE val_range CEBRACE
pass
SET39 = self.match(self.input, SET, self.FOLLOW_SET_in_set_decl1145)
stream_SET.add(SET39)
OEBRACE40 = self.match(self.input, OEBRACE, self.FOLLOW_OEBRACE_in_set_decl1147)
stream_OEBRACE.add(OEBRACE40)
self._state.following.append(self.FOLLOW_val_range_in_set_decl1149)
val_range41 = self.val_range()
self._state.following.pop()
stream_val_range.add(val_range41.tree)
CEBRACE42 = self.match(self.input, CEBRACE, self.FOLLOW_CEBRACE_in_set_decl1151)
stream_CEBRACE.add(CEBRACE42)
# AST Rewrite
# elements: val_range
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 181:46: -> ^( SET_ val_range )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:181:49: ^( SET_ val_range )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(SET_, "SET_")
, root_1)
self._adaptor.addChild(root_1, stream_val_range.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "set_decl"
class objset_decl_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "objset_decl"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:182:5: objset_decl : SET OEBRACE val_range CEBRACE -> ^( OBJSET_ val_range ) ;
def objset_decl(self, ):
retval = self.objset_decl_return()
retval.start = self.input.LT(1)
root_0 = None
SET43 = None
OEBRACE44 = None
CEBRACE46 = None
val_range45 = None
SET43_tree = None
OEBRACE44_tree = None
CEBRACE46_tree = None
stream_SET = RewriteRuleTokenStream(self._adaptor, "token SET")
stream_CEBRACE = RewriteRuleTokenStream(self._adaptor, "token CEBRACE")
stream_OEBRACE = RewriteRuleTokenStream(self._adaptor, "token OEBRACE")
stream_val_range = RewriteRuleSubtreeStream(self._adaptor, "rule val_range")
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:182:17: ( SET OEBRACE val_range CEBRACE -> ^( OBJSET_ val_range ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:182:19: SET OEBRACE val_range CEBRACE
pass
SET43 = self.match(self.input, SET, self.FOLLOW_SET_in_objset_decl1170)
stream_SET.add(SET43)
OEBRACE44 = self.match(self.input, OEBRACE, self.FOLLOW_OEBRACE_in_objset_decl1172)
stream_OEBRACE.add(OEBRACE44)
self._state.following.append(self.FOLLOW_val_range_in_objset_decl1174)
val_range45 = self.val_range()
self._state.following.pop()
stream_val_range.add(val_range45.tree)
CEBRACE46 = self.match(self.input, CEBRACE, self.FOLLOW_CEBRACE_in_objset_decl1176)
stream_CEBRACE.add(CEBRACE46)
# AST Rewrite
# elements: val_range
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 182:49: -> ^( OBJSET_ val_range )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:182:52: ^( OBJSET_ val_range )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(OBJSET_, "OBJSET_")
, root_1)
self._adaptor.addChild(root_1, stream_val_range.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "objset_decl"
class range_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "range"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:184:9: range : OEBRACE val_range DOT DOT val_range CEBRACE -> ^( RANGE_ OEBRACE val_range DOT DOT val_range CEBRACE ) ;
def range(self, ):
retval = self.range_return()
retval.start = self.input.LT(1)
root_0 = None
OEBRACE47 = None
DOT49 = None
DOT50 = None
CEBRACE52 = None
val_range48 = None
val_range51 = None
OEBRACE47_tree = None
DOT49_tree = None
DOT50_tree = None
CEBRACE52_tree = None
stream_DOT = RewriteRuleTokenStream(self._adaptor, "token DOT")
stream_CEBRACE = RewriteRuleTokenStream(self._adaptor, "token CEBRACE")
stream_OEBRACE = RewriteRuleTokenStream(self._adaptor, "token OEBRACE")
stream_val_range = RewriteRuleSubtreeStream(self._adaptor, "rule val_range")
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:184:15: ( OEBRACE val_range DOT DOT val_range CEBRACE -> ^( RANGE_ OEBRACE val_range DOT DOT val_range CEBRACE ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:184:17: OEBRACE val_range DOT DOT val_range CEBRACE
pass
OEBRACE47 = self.match(self.input, OEBRACE, self.FOLLOW_OEBRACE_in_range1200)
stream_OEBRACE.add(OEBRACE47)
self._state.following.append(self.FOLLOW_val_range_in_range1202)
val_range48 = self.val_range()
self._state.following.pop()
stream_val_range.add(val_range48.tree)
DOT49 = self.match(self.input, DOT, self.FOLLOW_DOT_in_range1204)
stream_DOT.add(DOT49)
DOT50 = self.match(self.input, DOT, self.FOLLOW_DOT_in_range1206)
stream_DOT.add(DOT50)
self._state.following.append(self.FOLLOW_val_range_in_range1208)
val_range51 = self.val_range()
self._state.following.pop()
stream_val_range.add(val_range51.tree)
CEBRACE52 = self.match(self.input, CEBRACE, self.FOLLOW_CEBRACE_in_range1210)
stream_CEBRACE.add(CEBRACE52)
# AST Rewrite
# elements: OEBRACE, val_range, DOT, DOT, val_range, CEBRACE
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", | |
<gh_stars>0
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main_win.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1075, 800)
MainWindow.setMinimumSize(QtCore.QSize(1075, 800))
MainWindow.setMaximumSize(QtCore.QSize(1075, 800))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(130, 175, 136))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(197, 255, 205))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(163, 215, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(65, 87, 68))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(87, 117, 91))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(130, 175, 136))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(192, 215, 195))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(130, 175, 136))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(197, 255, 205))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(163, 215, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(65, 87, 68))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(87, 117, 91))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(130, 175, 136))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(192, 215, 195))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(65, 87, 68))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(130, 175, 136))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(197, 255, 205))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(163, 215, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(65, 87, 68))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(87, 117, 91))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(65, 87, 68))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(65, 87, 68))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(130, 175, 136))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(130, 175, 136))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(130, 175, 136))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
MainWindow.setPalette(palette)
font = QtGui.QFont()
font.setPointSize(11)
MainWindow.setFont(font)
MainWindow.setDocumentMode(False)
MainWindow.setTabShape(QtWidgets.QTabWidget.Rounded)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.graphicsView = QtWidgets.QGraphicsView(self.centralwidget)
self.graphicsView.setGeometry(QtCore.QRect(290, 18, 771, 741))
self.graphicsView.setMinimumSize(QtCore.QSize(700, 550))
self.graphicsView.setMaximumSize(QtCore.QSize(10000, 10000))
self.graphicsView.setObjectName("graphicsView")
self.drawButton = QtWidgets.QPushButton(self.centralwidget)
self.drawButton.setGeometry(QtCore.QRect(10, 640, 251, 61))
self.drawButton.setObjectName("drawButton")
self.clearCanvas = QtWidgets.QPushButton(self.centralwidget)
self.clearCanvas.setGeometry(QtCore.QRect(10, 710, 251, 61))
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(False)
font.setItalic(False)
font.setUnderline(False)
font.setWeight(50)
font.setStrikeOut(False)
font.setStyleStrategy(QtGui.QFont.PreferDefault)
self.clearCanvas.setFont(font)
self.clearCanvas.setAcceptDrops(False)
self.clearCanvas.setObjectName("clearCanvas")
self.line_4 = QtWidgets.QFrame(self.centralwidget)
self.line_4.setGeometry(QtCore.QRect(260, -1, 21, 851))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(96, 162, 96))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(144, 243, 144))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 202, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(48, 81, 48))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(64, 108, 64))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(96, 162, 96))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(175, 208, 175))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(96, 162, 96))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(144, 243, 144))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 202, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(48, 81, 48))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(64, 108, 64))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(96, 162, 96))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(175, 208, 175))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(48, 81, 48))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(96, 162, 96))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(144, 243, 144))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 202, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(48, 81, 48))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(64, 108, 64))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(48, 81, 48))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(48, 81, 48))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(96, 162, 96))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(96, 162, 96))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(96, 162, 96))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.line_4.setPalette(palette)
self.line_4.setFrameShape(QtWidgets.QFrame.VLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setGeometry(QtCore.QRect(0, 620, 271, 20))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.line_6 = QtWidgets.QFrame(self.centralwidget)
self.line_6.setGeometry(QtCore.QRect(0, 150, 271, 20))
self.line_6.setFrameShape(QtWidgets.QFrame.HLine)
self.line_6.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_6.setObjectName("line_6")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(10, 10, 251, 141))
self.label.setObjectName("label")
self.func_switch = QtWidgets.QComboBox(self.centralwidget)
self.func_switch.setGeometry(QtCore.QRect(50, 170, 211, 31))
self.func_switch.setObjectName("func_switch")
self.func_switch.addItem("")
self.func_switch.addItem("")
self.func_switch.addItem("")
self.func_switch.addItem("")
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setGeometry(QtCore.QRect(10, 210, 251, 121))
self.groupBox.setObjectName("groupBox")
self.x_step = QtWidgets.QSlider(self.groupBox)
self.x_step.setGeometry(QtCore.QRect(10, 80, 231, 31))
self.x_step.setMaximum(50)
self.x_step.setProperty("value", 11)
self.x_step.setOrientation(QtCore.Qt.Horizontal)
self.x_step.setInvertedAppearance(False)
self.x_step.setInvertedControls(False)
self.x_step.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.x_step.setTickInterval(10)
self.x_step.setObjectName("x_step")
self.x_begin = QtWidgets.QLineEdit(self.groupBox)
self.x_begin.setGeometry(QtCore.QRect(10, 30, 111, 31))
self.x_begin.setObjectName("x_begin")
self.x_end = QtWidgets.QLineEdit(self.groupBox)
self.x_end.setGeometry(QtCore.QRect(130, 30, 111, 31))
self.x_end.setObjectName("x_end")
self.groupBox_2 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_2.setGeometry(QtCore.QRect(10, 340, 251, 121))
self.groupBox_2.setObjectName("groupBox_2")
self.z_step = QtWidgets.QSlider(self.groupBox_2)
self.z_step.setGeometry(QtCore.QRect(10, 80, 231, 31))
self.z_step.setMaximum(50)
self.z_step.setProperty("value", 11)
self.z_step.setOrientation(QtCore.Qt.Horizontal)
self.z_step.setInvertedAppearance(False)
self.z_step.setInvertedControls(False)
self.z_step.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.z_step.setTickInterval(10)
self.z_step.setObjectName("z_step")
self.z_begin = QtWidgets.QLineEdit(self.groupBox_2)
self.z_begin.setGeometry(QtCore.QRect(10, 30, 111, 31))
self.z_begin.setObjectName("z_begin")
self.z_end = QtWidgets.QLineEdit(self.groupBox_2)
self.z_end.setGeometry(QtCore.QRect(130, 30, 111, 31))
self.z_end.setObjectName("z_end")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(10, 170, 35, 31))
self.label_2.setObjectName("label_2")
self.groupBox_3 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_3.setGeometry(QtCore.QRect(10, 470, 251, 151))
self.groupBox_3.setObjectName("groupBox_3")
self.label_3 = QtWidgets.QLabel(self.groupBox_3)
self.label_3.setGeometry(QtCore.QRect(10, 31, 41, 31))
self.label_3.setObjectName("label_3")
self.ox_rotate = QtWidgets.QSlider(self.groupBox_3)
self.ox_rotate.setGeometry(QtCore.QRect(59, 31, 181, 31))
self.ox_rotate.setMinimum(-180)
self.ox_rotate.setMaximum(180)
self.ox_rotate.setOrientation(QtCore.Qt.Horizontal)
self.ox_rotate.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.ox_rotate.setObjectName("ox_rotate")
self.oy_rotate = QtWidgets.QSlider(self.groupBox_3)
self.oy_rotate.setGeometry(QtCore.QRect(59, 71, 181, 31))
self.oy_rotate.setMinimum(-180)
self.oy_rotate.setMaximum(180)
self.oy_rotate.setOrientation(QtCore.Qt.Horizontal)
self.oy_rotate.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.oy_rotate.setObjectName("oy_rotate")
self.label_4 = QtWidgets.QLabel(self.groupBox_3)
self.label_4.setGeometry(QtCore.QRect(10, 71, 41, 31))
self.label_4.setObjectName("label_4")
self.oz_rotate = QtWidgets.QSlider(self.groupBox_3)
self.oz_rotate.setGeometry(QtCore.QRect(59, 110, 181, 31))
self.oz_rotate.setMinimum(-180)
self.oz_rotate.setMaximum(180)
self.oz_rotate.setOrientation(QtCore.Qt.Horizontal)
self.oz_rotate.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.oz_rotate.setObjectName("oz_rotate")
self.label_5 = QtWidgets.QLabel(self.groupBox_3)
self.label_5.setGeometry(QtCore.QRect(10, 111, 41, 31))
self.label_5.setObjectName("label_5")
self.graphicsView.raise_()
self.drawButton.raise_()
self.clearCanvas.raise_()
self.line.raise_()
self.line_6.raise_()
self.line_4.raise_()
self.label.raise_()
self.func_switch.raise_()
self.groupBox.raise_()
self.groupBox_2.raise_()
self.label_2.raise_()
self.groupBox_3.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1075, 18))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.graphicsView, self.drawButton)
MainWindow.setTabOrder(self.drawButton, self.clearCanvas)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Построение трёхмерных поверхностей"))
self.drawButton.setText(_translate("MainWindow", "Построить поверхность"))
self.clearCanvas.setText(_translate("MainWindow", "Очистить экран"))
self.label.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\"><span style=\" font-size:16pt;\">Алгоритм </span></p><p align=\"center\"><span style=\" font-size:16pt;\">Плавающего</span></p><p align=\"center\"><span style=\" font-size:16pt;\">горизонта</span></p></body></html>"))
self.func_switch.setItemText(0, _translate("MainWindow", "x² + z²"))
self.func_switch.setItemText(1, _translate("MainWindow", "sin(√(x² + z²))"))
self.func_switch.setItemText(2, _translate("MainWindow", "√|1 + z² - x²|"))
self.func_switch.setItemText(3, _translate("MainWindow", "x + z"))
self.groupBox.setTitle(_translate("MainWindow", "X"))
self.x_begin.setText(_translate("MainWindow", "-10"))
self.x_end.setText(_translate("MainWindow", "10"))
self.groupBox_2.setTitle(_translate("MainWindow", "Z"))
self.z_begin.setText(_translate("MainWindow", "-10"))
self.z_end.setText(_translate("MainWindow", "10"))
self.label_2.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:12pt;\">y | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Implement different methods to hedge positions and measure the risk of a Zero
cupon bond portfolio
REFERENCE: <NAME>; <NAME>.; <NAME>., "Interest Rate Risk
Modeling, the fixed Income Valuation course". Wiley, 2005
@author: ucaiado
Created on 12/22/2016
"""
import numpy as np
import math
import pandas as pd
import pprint
'''
Begin help functions
'''
'''
End help functions
'''
def update_maxmin(f_frice, a):
'''
Update maximum and minimum price observed by the agent while positioned
:param f_frice: float.
:param a: agent object.
'''
if f_frice > a.current_max_price:
a.current_max_price = f_frice
if f_frice < a.current_min_price:
a.current_min_price = f_frice
class RiskModel(object):
'''
A basic risk model representation for a fixed income strategy that measures
the loss potential and the immunization needs
'''
def __init__(self, env, f_portfolio_value=10**6):
'''
Initiate a RiskModel object. Save all parameters as attributes
:param env: Environment object. the environment that uses this object
:param f_portfolio_value*: float. The total
'''
self.env = env
self.l_hedging_instr = env.l_hedge
self.s_main = env.s_main_intrument
self.l_ratios = []
self.d_dv01 = {}
self.na_pu = None
self.na_du = None
self.f_portfolio_value = f_portfolio_value
self.s_risk_model = 'BasicModel'
self.b_stop_trading = False
self.price_stop_buy = None
self.price_stop_sell = None
def reset(self):
'''
reset risk model parameters to use in a new simulation
'''
self.current_price = None
self.b_stop_trading = False
self.price_stop_buy = None
self.price_stop_sell = None
self.l_ratios = []
self.na_pu = None
self.na_du = None
def set_ratios(self):
'''
Set the DV01 ratios of the pair between the main instrument and the
others avaiable to hedging
'''
# calculate the dv01 for each instrument
d_aux = {}
l_rtn = []
l_du = []
for s_key, idx in self.env.order_matching.d_map_book_list.iteritems():
book_obj = self.env.order_matching.l_order_books[idx]
f_du = self.env.l_du[self.env.order_matching.idx][idx]/252.
f_price, f_qty = book_obj.best_bid
f_dv01 = (f_du*10.)/(1. + f_price/100.)**(1. + f_du)
d_aux[s_key] = f_dv01
l_du.append(f_du)
# calculate the ration in relation to the main instrument
self.d_dv01 = d_aux
for s_instr in self.l_hedging_instr:
l_rtn.append(d_aux[s_instr]/d_aux[self.s_main])
self.l_du = l_du
return l_rtn
def portfolio_duration(self, d_position):
'''
Return the duration of a portfolio
:param d_position: dictionary. portfolio to be hedged
'''
l_pu = []
l_pos = []
l_du = []
self.last_pu = {}
self.last_pos = {}
self.last_du = {}
for s_key, idx in self.env.order_matching.d_map_book_list.iteritems():
book_obj = self.env.order_matching.l_order_books[idx]
f_du = self.env.l_du[self.env.order_matching.idx][idx]
f_price, f_qty = book_obj.best_bid
f_pu = 10.**5/(1. + f_price/100.)**(f_du/252.)
f_pos = -d_position[s_key]['qBid'] # inverto para qty em PU ?
f_pos -= -d_position[s_key]['qAsk']
self.last_du[s_key] = f_du
l_du.append(f_du)
self.last_pos[s_key] = f_pos
l_pos.append(f_pos)
self.last_pu[s_key] = f_pu
l_pu.append(f_pu)
return self._get_duration(l_pu, l_du, l_pos)
def _get_duration(self, l_pu, l_du, l_pos):
'''
Calculate the duration for a given position
:param l_pu: list.
:param l_du: list.
:param l_pos: list. final position in each instrument traded
'''
na_weight = self._get_weights(l_pu, l_pos)
return sum(np.array(l_du)/252. * na_weight)
def _get_weights(self, l_pu, l_pos):
'''
Return the positions as portfolio weights
:param l_pu: list. the PU of each instrument
:param l_pos: list. final position in each instrument traded (in PU)
'''
na_weight = np.array(l_pu) * np.array(l_pos)
na_weight /= self.f_portfolio_value
return na_weight
def get_instruments_to_hedge(self, agent):
'''
Return a list of tuples with the instruments and quantities that can be
used to hedge a given portfolio
:param agent: Agent object. agent that need to hedge
'''
d_position = agent.position
return self._get_instruments_to_hedge(d_position)
def _get_instruments_to_hedge(self, d_position):
'''
Return a list of tuples with the instruments and quantities that can be
used to hedge a given portfolio
:param d_position: dictionary. portfolio in qty of contracts
'''
# check the ratios just once
if not self.l_ratios:
self.l_ratios = self.set_ratios()
f_current_duration = self.portfolio_duration(d_position)
# check were should hedge and what quantity
f_main_pos = -d_position[self.s_main]['qBid']
f_main_pos -= -d_position[self.s_main]['qAsk']
l_hedged_position = []
l_pos = [f_main_pos]
l_du = [self.last_du[self.s_main]]
l_pu = [self.last_pu[self.s_main]]
for s_instr, f_ratio in zip(self.l_hedging_instr, self.l_ratios):
if s_instr == self.s_main:
s_action = 'BUY'
if f_main_pos < 0:
s_action = 'SELL'
if f_main_pos == 0:
return []
return [(s_action, s_instr, f_main_pos)]
f_aux_pos = -d_position[s_instr]['qBid']
f_aux_pos -= -d_position[s_instr]['qAsk']
l_hedged_position.append(f_aux_pos*f_ratio)
l_pos.append(f_aux_pos)
l_du.append(self.last_du[s_instr])
l_pu.append(self.last_pu[s_instr])
f_main_position = f_main_pos + sum(np.array(l_hedged_position))
na_to_hedge = np.array([f_main_position] * len(l_hedged_position))
na_to_hedge /= np.array(self.l_ratios)
na_sign = np.sign(na_to_hedge)
na_mult = 5 * na_sign
if sum((abs(na_to_hedge)/5) < 1) != 0:
na_to_hedge = np.ceil(abs(na_to_hedge)/5).astype(int) * na_mult
else:
na_to_hedge = np.round(abs(na_to_hedge)/5).astype(int) * na_mult
l_to_hedge = list(na_to_hedge)
l_rtn = []
for idx, s_instr in enumerate(self.l_hedging_instr):
i_qty = -l_to_hedge[idx]
if i_qty != 0:
l_pos_aux = l_pos[:]
l_pos_aux[idx+1] += i_qty
f_future_duration = self._get_duration(l_pu, l_du, l_pos_aux)
f_abs_dur = abs(f_future_duration)
# if qty is not enough to dicrease the duration, increase it
if f_abs_dur > 1.2 and f_abs_dur < 3.:
i_qty *= 2
elif f_abs_dur >= 3.:
i_qty *= 3
l_pos_aux = l_pos[:]
l_pos_aux[idx+1] += i_qty
f_future_duration = self._get_duration(l_pu, l_du, l_pos_aux)
# recalculate all
if abs(f_future_duration) < abs(f_current_duration):
# change to rate quantity
s_action = 'BUY'
if -i_qty < 0:
s_action = 'SELL'
l_rtn.append((s_action, s_instr, -i_qty))
return l_rtn
class KRDModel(RiskModel):
'''
A key rate duration model representation that uses the KRDs selected to
decide what instruments sould be used in the immunization of a portfolio
'''
def __init__(self, env, l_krd, f_portfolio_value=10**6, s_kind='trava'):
'''
Initiate a KRDModel object. Save all parameters as attributes
:param env: Environment object. the environment that uses this object
:param l_krd: list. maturity of the key rates used, in years
:param f_portfolio_value*: float. The total
'''
super(KRDModel, self).__init__(env, f_portfolio_value)
self.s_risk_model = 'KRDModel_{}'.format(s_kind)
self.l_krd = l_krd
self.df_ratios = None
self.l_cmm_target = ['DI1F19', 'DI1F21', 'DI1F23']
self.s_kind = s_kind
def portfolio_krd(self, d_position):
'''
Return a tuple with the key rate durations of a portfolio and all
information needed to recalculate it
:param d_position: dictionary. portfolio to be hedged
'''
# recover variables
f_facevalue = 10.**5
l_rates = []
l_pos = []
l_maturity = []
l_instrument = []
for s_key, idx in self.env.order_matching.d_map_book_list.iteritems():
book_obj = self.env.order_matching.l_order_books[idx]
l_instrument.append(book_obj.s_instrument)
f_du = self.env.l_du[self.env.order_matching.idx][idx]
f_price, f_qty = book_obj.best_bid
f_pos = -d_position[s_key]['qBid'] # inverto para qty em PU ?
f_pos -= -d_position[s_key]['qAsk']
l_maturity.append(f_du/252.)
l_pos.append(f_pos)
l_rates.append(f_price)
# get the key rate duration matrix
l_exp_pu = [f_facevalue * np.exp(-f_rate/100 * f_mat)
for f_rate, f_mat in zip(l_rates, l_maturity)]
l_pu = [f_facevalue * (1.+f_rate/100)**(-f_mat)
for f_rate, f_mat in zip(l_rates, l_maturity)]
l_dPdYP = [f_facevalue * f_mat * np.exp(-f_rate/100 * f_mat)
for f_rate, f_mat in zip(l_rates, l_maturity)]
df_krd = self.key_rates(l_dPdYP, l_exp_pu)
na_weights = self._get_weights(l_pu, l_pos)
df_exposure = self._get_krd_exposure(df_krd, na_weights)
t_rtn = (df_krd, na_weights, df_exposure, l_maturity, l_pos, l_pu,
l_instrument)
return t_rtn
def _get_krd_exposure(self, df_krd, na_weights):
'''
Return the exposure in KRDs based on krds passed and weights
:param df_krd: data frame. KRD of the instruments traded
:param na_weights: numpy array. the weight in portfolio of eack KRD
'''
df_exposure = pd.Series(df_krd.T.dot(na_weights))
df_exposure.index = self.l_krd
return df_exposure
def key_rates(self, l_dPdYP, l_pu):
'''
Return the matrix of key rates durations for the instruments traded
in the environment
:param l_dPdYP: list. $\frac{dP * P}{dY}$
:param l_pu: list. PU of aeach contract
'''
# add up the linear contributions $s(t, t_i)\$ for $i=1, 2, ..., m$ to
# obtain the change in the given zero-coupon rate $\Delta y(t)$
if isinstance(self.df_ratios, type(None)):
self._set_linear_contributions()
df = self.df_ratios
return df.apply(lambda x: x * np.array(l_dPdYP) / np.array(l_pu),
axis=0)
def get_target_krds(self, l_cmm, d_data, df_krd, s_kind='fly'):
'''
Rerturn the target krds pandas serties to be the same of a buttlerfly.
:param l_cmm: list. instruments used in the butterfly, ordered by matry
:param d_data: dictionary. maturity and PU of each instrument
:param s_kind*: string. the kind of target to return
'''
# calculate positions
if s_kind == 'fly':
f_Qm = 1. # quantity at the middle of the structure
f_alpha = (d_data[l_cmm[2]][1] * 1. - d_data[l_cmm[1]][1])
f_alpha /= (d_data[l_cmm[2]][1] / 1. - d_data[l_cmm[0]][1])
f_Qs = (f_Qm * f_alpha * d_data[l_cmm[1]][0]) / d_data[l_cmm[0]][0]
f_Ql = (f_Qm * (1 - f_alpha) * d_data[l_cmm[1]][0])
f_Ql /= d_data[l_cmm[2]][0]
l_pos = [-f_Qs, f_Qm, -f_Ql]
elif s_kind == 'trava':
l_pu = [d_data[s_key][0] for s_key in l_cmm]
l_mat = [d_data[s_key][1] for s_key in l_cmm]
l_pos = [0., 10, 0.]
na_weights = self._get_weights(l_pu, l_pos)
f_curr_duration = sum(np.array(l_mat) * na_weights)
l_pos_aux = []
for s_key in self.l_hedging_instr:
f_pu = d_data[s_key][0]
f_matr = d_data[s_key][1]
f_dur_aux = 5. * f_pu / self.f_portfolio_value * f_matr
f_unt = -f_curr_duration / f_dur_aux * 5.
l_pos_aux.append(f_unt)
l_pos = [l_pos_aux[0]/20.] + [1.] + [l_pos_aux[1]/20.]
# | |
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v1.model_utils import (
ModelComposed,
cached_property,
)
def lazy_import():
from datadog_api_client.v1.model.alert_graph_widget_definition import AlertGraphWidgetDefinition
from datadog_api_client.v1.model.alert_value_widget_definition import AlertValueWidgetDefinition
from datadog_api_client.v1.model.change_widget_definition import ChangeWidgetDefinition
from datadog_api_client.v1.model.check_status_widget_definition import CheckStatusWidgetDefinition
from datadog_api_client.v1.model.distribution_widget_definition import DistributionWidgetDefinition
from datadog_api_client.v1.model.event_stream_widget_definition import EventStreamWidgetDefinition
from datadog_api_client.v1.model.event_timeline_widget_definition import EventTimelineWidgetDefinition
from datadog_api_client.v1.model.free_text_widget_definition import FreeTextWidgetDefinition
from datadog_api_client.v1.model.funnel_widget_definition import FunnelWidgetDefinition
from datadog_api_client.v1.model.funnel_widget_definition_type import FunnelWidgetDefinitionType
from datadog_api_client.v1.model.funnel_widget_request import FunnelWidgetRequest
from datadog_api_client.v1.model.geomap_widget_definition import GeomapWidgetDefinition
from datadog_api_client.v1.model.geomap_widget_definition_view import GeomapWidgetDefinitionView
from datadog_api_client.v1.model.group_widget_definition import GroupWidgetDefinition
from datadog_api_client.v1.model.heat_map_widget_definition import HeatMapWidgetDefinition
from datadog_api_client.v1.model.host_map_widget_definition import HostMapWidgetDefinition
from datadog_api_client.v1.model.host_map_widget_definition_style import HostMapWidgetDefinitionStyle
from datadog_api_client.v1.model.i_frame_widget_definition import IFrameWidgetDefinition
from datadog_api_client.v1.model.image_widget_definition import ImageWidgetDefinition
from datadog_api_client.v1.model.list_stream_widget_definition import ListStreamWidgetDefinition
from datadog_api_client.v1.model.log_stream_widget_definition import LogStreamWidgetDefinition
from datadog_api_client.v1.model.monitor_summary_widget_definition import MonitorSummaryWidgetDefinition
from datadog_api_client.v1.model.note_widget_definition import NoteWidgetDefinition
from datadog_api_client.v1.model.query_value_widget_definition import QueryValueWidgetDefinition
from datadog_api_client.v1.model.scatter_plot_widget_definition import ScatterPlotWidgetDefinition
from datadog_api_client.v1.model.service_map_widget_definition import ServiceMapWidgetDefinition
from datadog_api_client.v1.model.service_summary_widget_definition import ServiceSummaryWidgetDefinition
from datadog_api_client.v1.model.slo_widget_definition import SLOWidgetDefinition
from datadog_api_client.v1.model.sunburst_widget_definition import SunburstWidgetDefinition
from datadog_api_client.v1.model.sunburst_widget_legend import SunburstWidgetLegend
from datadog_api_client.v1.model.table_widget_definition import TableWidgetDefinition
from datadog_api_client.v1.model.table_widget_has_search_bar import TableWidgetHasSearchBar
from datadog_api_client.v1.model.timeseries_widget_definition import TimeseriesWidgetDefinition
from datadog_api_client.v1.model.timeseries_widget_legend_column import TimeseriesWidgetLegendColumn
from datadog_api_client.v1.model.timeseries_widget_legend_layout import TimeseriesWidgetLegendLayout
from datadog_api_client.v1.model.toplist_widget_definition import ToplistWidgetDefinition
from datadog_api_client.v1.model.tree_map_color_by import TreeMapColorBy
from datadog_api_client.v1.model.tree_map_group_by import TreeMapGroupBy
from datadog_api_client.v1.model.tree_map_size_by import TreeMapSizeBy
from datadog_api_client.v1.model.tree_map_widget_definition import TreeMapWidgetDefinition
from datadog_api_client.v1.model.widget import Widget
from datadog_api_client.v1.model.widget_axis import WidgetAxis
from datadog_api_client.v1.model.widget_color_preference import WidgetColorPreference
from datadog_api_client.v1.model.widget_custom_link import WidgetCustomLink
from datadog_api_client.v1.model.widget_event import WidgetEvent
from datadog_api_client.v1.model.widget_event_size import WidgetEventSize
from datadog_api_client.v1.model.widget_grouping import WidgetGrouping
from datadog_api_client.v1.model.widget_horizontal_align import WidgetHorizontalAlign
from datadog_api_client.v1.model.widget_image_sizing import WidgetImageSizing
from datadog_api_client.v1.model.widget_layout_type import WidgetLayoutType
from datadog_api_client.v1.model.widget_margin import WidgetMargin
from datadog_api_client.v1.model.widget_marker import WidgetMarker
from datadog_api_client.v1.model.widget_message_display import WidgetMessageDisplay
from datadog_api_client.v1.model.widget_monitor_summary_sort import WidgetMonitorSummarySort
from datadog_api_client.v1.model.widget_node_type import WidgetNodeType
from datadog_api_client.v1.model.widget_service_summary_display_format import WidgetServiceSummaryDisplayFormat
from datadog_api_client.v1.model.widget_size_format import WidgetSizeFormat
from datadog_api_client.v1.model.widget_summary_type import WidgetSummaryType
from datadog_api_client.v1.model.widget_text_align import WidgetTextAlign
from datadog_api_client.v1.model.widget_tick_edge import WidgetTickEdge
from datadog_api_client.v1.model.widget_time import WidgetTime
from datadog_api_client.v1.model.widget_time_windows import WidgetTimeWindows
from datadog_api_client.v1.model.widget_vertical_align import WidgetVerticalAlign
from datadog_api_client.v1.model.widget_view_mode import WidgetViewMode
from datadog_api_client.v1.model.widget_viz_type import WidgetVizType
globals()["AlertGraphWidgetDefinition"] = AlertGraphWidgetDefinition
globals()["AlertValueWidgetDefinition"] = AlertValueWidgetDefinition
globals()["ChangeWidgetDefinition"] = ChangeWidgetDefinition
globals()["CheckStatusWidgetDefinition"] = CheckStatusWidgetDefinition
globals()["DistributionWidgetDefinition"] = DistributionWidgetDefinition
globals()["EventStreamWidgetDefinition"] = EventStreamWidgetDefinition
globals()["EventTimelineWidgetDefinition"] = EventTimelineWidgetDefinition
globals()["FreeTextWidgetDefinition"] = FreeTextWidgetDefinition
globals()["FunnelWidgetDefinition"] = FunnelWidgetDefinition
globals()["FunnelWidgetDefinitionType"] = FunnelWidgetDefinitionType
globals()["FunnelWidgetRequest"] = FunnelWidgetRequest
globals()["GeomapWidgetDefinition"] = GeomapWidgetDefinition
globals()["GeomapWidgetDefinitionView"] = GeomapWidgetDefinitionView
globals()["GroupWidgetDefinition"] = GroupWidgetDefinition
globals()["HeatMapWidgetDefinition"] = HeatMapWidgetDefinition
globals()["HostMapWidgetDefinition"] = HostMapWidgetDefinition
globals()["HostMapWidgetDefinitionStyle"] = HostMapWidgetDefinitionStyle
globals()["IFrameWidgetDefinition"] = IFrameWidgetDefinition
globals()["ImageWidgetDefinition"] = ImageWidgetDefinition
globals()["ListStreamWidgetDefinition"] = ListStreamWidgetDefinition
globals()["LogStreamWidgetDefinition"] = LogStreamWidgetDefinition
globals()["MonitorSummaryWidgetDefinition"] = MonitorSummaryWidgetDefinition
globals()["NoteWidgetDefinition"] = NoteWidgetDefinition
globals()["QueryValueWidgetDefinition"] = QueryValueWidgetDefinition
globals()["SLOWidgetDefinition"] = SLOWidgetDefinition
globals()["ScatterPlotWidgetDefinition"] = ScatterPlotWidgetDefinition
globals()["ServiceMapWidgetDefinition"] = ServiceMapWidgetDefinition
globals()["ServiceSummaryWidgetDefinition"] = ServiceSummaryWidgetDefinition
globals()["SunburstWidgetDefinition"] = SunburstWidgetDefinition
globals()["SunburstWidgetLegend"] = SunburstWidgetLegend
globals()["TableWidgetDefinition"] = TableWidgetDefinition
globals()["TableWidgetHasSearchBar"] = TableWidgetHasSearchBar
globals()["TimeseriesWidgetDefinition"] = TimeseriesWidgetDefinition
globals()["TimeseriesWidgetLegendColumn"] = TimeseriesWidgetLegendColumn
globals()["TimeseriesWidgetLegendLayout"] = TimeseriesWidgetLegendLayout
globals()["ToplistWidgetDefinition"] = ToplistWidgetDefinition
globals()["TreeMapColorBy"] = TreeMapColorBy
globals()["TreeMapGroupBy"] = TreeMapGroupBy
globals()["TreeMapSizeBy"] = TreeMapSizeBy
globals()["TreeMapWidgetDefinition"] = TreeMapWidgetDefinition
globals()["Widget"] = Widget
globals()["WidgetAxis"] = WidgetAxis
globals()["WidgetColorPreference"] = WidgetColorPreference
globals()["WidgetCustomLink"] = WidgetCustomLink
globals()["WidgetEvent"] = WidgetEvent
globals()["WidgetEventSize"] = WidgetEventSize
globals()["WidgetGrouping"] = WidgetGrouping
globals()["WidgetHorizontalAlign"] = WidgetHorizontalAlign
globals()["WidgetImageSizing"] = WidgetImageSizing
globals()["WidgetLayoutType"] = WidgetLayoutType
globals()["WidgetMargin"] = WidgetMargin
globals()["WidgetMarker"] = WidgetMarker
globals()["WidgetMessageDisplay"] = WidgetMessageDisplay
globals()["WidgetMonitorSummarySort"] = WidgetMonitorSummarySort
globals()["WidgetNodeType"] = WidgetNodeType
globals()["WidgetServiceSummaryDisplayFormat"] = WidgetServiceSummaryDisplayFormat
globals()["WidgetSizeFormat"] = WidgetSizeFormat
globals()["WidgetSummaryType"] = WidgetSummaryType
globals()["WidgetTextAlign"] = WidgetTextAlign
globals()["WidgetTickEdge"] = WidgetTickEdge
globals()["WidgetTime"] = WidgetTime
globals()["WidgetTimeWindows"] = WidgetTimeWindows
globals()["WidgetVerticalAlign"] = WidgetVerticalAlign
globals()["WidgetViewMode"] = WidgetViewMode
globals()["WidgetVizType"] = WidgetVizType
class WidgetDefinition(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
validations = {
"requests": {
"max_items": 1,
"min_items": 1,
},
"filters": {
"min_items": 1,
},
}
@cached_property
def openapi_types():
return {}
def __init__(self, *args, **kwargs):
"""WidgetDefinition - a model defined in OpenAPI
Keyword Args:
time (WidgetTime): [optional]
title (str): [optional] The title of the widget.
title_align (WidgetTextAlign): [optional]
title_size (str): [optional] The size of the title.
precision (int): [optional] Number of decimals to show. If not defined, the widget uses the raw value.
text_align (WidgetTextAlign): [optional]
unit (str): [optional] Unit to display with the value.
custom_links ([WidgetCustomLink]): [optional] List of custom links.
group ([str]): [optional] List of tag prefixes to group by.
group_by (TreeMapGroupBy): [optional]
tags ([str]): [optional] List of tags used to filter the groups reporting a cluster check.
legend_size (str): [optional] Available legend sizes for a widget. Should be one of \"0\", \"2\", \"4\", \"8\", \"16\", or \"auto\".
markers ([WidgetMarker]): [optional] List of markers.
show_legend (bool): [optional] Whether or not to display the legend on this widget.
xaxis (WidgetAxis): [optional]
yaxis (WidgetAxis): [optional]
event_size (WidgetEventSize): [optional]
tags_execution (str): [optional] The execution method for multi-value filters. Can be either and or or.
color (str): [optional] Color of the text.
font_size (str): [optional] Size of the text.
background_color (str): [optional] Background color of the note.
banner_img (str): [optional] URL of image to display as a banner for the group.
show_title (bool): [optional] Whether to show the title or not. If omitted the server will use the default value of True.
events ([WidgetEvent]): [optional] List of widget events.
no_group_hosts (bool): [optional] Whether to show the hosts that don’t fit in a group.
no_metric_hosts (bool): [optional] Whether to show the hosts with no metrics.
node_type (WidgetNodeType): [optional]
notes (str): [optional] Notes on the title.
scope ([str]): [optional] List of tags used to filter the map.
has_background (bool): [optional] Whether to display a background or not. If omitted the server will use the default value of True.
has_border (bool): [optional] Whether to display a border or not. If omitted the server will use the default value of True.
horizontal_align (WidgetHorizontalAlign): [optional]
margin (WidgetMargin): [optional]
sizing (WidgetImageSizing): [optional]
url_dark_theme (str): [optional] URL of the image in dark mode.
vertical_align (WidgetVerticalAlign): [optional]
columns ([str]): [optional] Which columns to display on the widget.
indexes ([str]): [optional] An array of index names to query in the stream. Use [] to query all indexes at once.
logset (str): [optional] ID of the log set to use.
message_display (WidgetMessageDisplay): [optional]
show_date_column (bool): [optional] Whether to show the date column or not
show_message_column (bool): [optional] Whether to show the message column or not
sort (WidgetMonitorSummarySort): [optional]
color_preference (WidgetColorPreference): [optional]
count (int): [optional] The number of monitors to display.
display_format (WidgetServiceSummaryDisplayFormat): [optional]
hide_zero_counts (bool): [optional] Whether to show counts of 0 or not.
show_last_triggered (bool): [optional] Whether to show the time that has elapsed since the monitor/group triggered.
start (int): [optional] The start of the list. Typically 0.
summary_type (WidgetSummaryType): [optional]
has_padding (bool): [optional] Whether to add padding or not. If omitted the server will use the default value of True.
show_tick (bool): [optional] Whether to show a tick or not.
tick_edge (WidgetTickEdge): [optional]
tick_pos (str): [optional] Where to position the tick on an edge.
autoscale (bool): [optional] Whether to use auto-scaling or not.
custom_unit (str): [optional] Display a unit of your choice on the widget.
color_by_groups ([str]): [optional] List of groups used for colors.
global_time_target (str): [optional] Defined global time target.
show_error_budget (bool): [optional] Defined error budget.
slo_id (str): [optional] ID of the SLO displayed.
time_windows ([WidgetTimeWindows]): [optional] Times being monitored.
view_mode (WidgetViewMode): [optional]
show_breakdown (bool): [optional] Whether to show the latency breakdown or not.
show_distribution (bool): [optional] Whether to show the latency distribution or not.
show_errors (bool): [optional] Whether to show the error metrics or not.
show_hits (bool): [optional] Whether to show the hits metrics or not.
show_latency (bool): [optional] Whether to show the latency metrics or not.
show_resource_list (bool): [optional] Whether to show the resource list or not.
size_format (WidgetSizeFormat): [optional]
hide_total (bool): [optional] Show the total value in this widget.
legend (SunburstWidgetLegend): [optional]
has_search_bar (TableWidgetHasSearchBar): [optional]
legend_columns ([TimeseriesWidgetLegendColumn]): [optional] Columns displayed in the legend.
legend_layout (TimeseriesWidgetLegendLayout): [optional]
right_yaxis (WidgetAxis): [optional]
color_by (TreeMapColorBy): [optional]
size_by (TreeMapSizeBy): [optional]
alert_id (str): [optional] ID of the alert to use in the widget.
type (FunnelWidgetDefinitionType): [optional]
viz_type (WidgetVizType): [optional]
requests ([FunnelWidgetRequest]): [optional] Request payload used to query items.
check (str): [optional] Name of the check to use in the widget.
grouping (WidgetGrouping): [optional]
query (str): [optional] Query to filter the monitors with.
text (str): [optional] Text to display.
style (HostMapWidgetDefinitionStyle): [optional]
view (GeomapWidgetDefinitionView): [optional]
layout_type (WidgetLayoutType): [optional]
widgets ([Widget]): [optional] List of widget groups.
url (str): [optional] URL of the image.
content (str): [optional] | |
py_v = py_v.lower()
return HammingDistance._compute(py_u, py_v, lambda x, y: x == y, evaluation)
class _StringDistance(Builtin):
options = {
'IgnoreCase': 'False'
}
def apply(self, a, b, evaluation, options):
'%(name)s[a_, b_, OptionsPattern[%(name)s]]'
if isinstance(a, String) and isinstance(b, String):
py_a = a.get_string_value()
py_b = b.get_string_value()
if options['System`IgnoreCase'] == SymbolTrue:
if hasattr(str, 'casefold'):
def normalize(c):
return unicodedata.normalize("NFKD", c.casefold())
py_a = [normalize(c) for c in py_a]
py_b = [normalize(c) for c in py_b]
else: # python2, PyPy
py_a = py_a.lower()
py_b = py_b.lower()
return Integer(self._distance(
py_a, py_b, lambda u, v: u == v))
elif a.get_head_name() == 'System`List' and b.get_head_name() == 'System`List':
return Integer(self._distance(
a.leaves, b.leaves, lambda u, v: u.same(v)))
else:
return Expression('EditDistance', a, b)
# Levenshtein's algorithm is defined by the following construction:
# (adapted from https://de.wikipedia.org/wiki/Levenshtein-Distanz)
#
# given two strings s1, s2, we build a matrix D sized (len(s1) + 1,
# len(s2) + 1) and fill it using the following rules:
#
# (1) D(0, 0) = 0
# (2) D(i, 0) = i, 1 <= i <= len(s1)
# (3) D(0, j) = j, 1 <= j <= len(s2)
# (4) D(i, j) = minimum of
# D(i - 1, j - 1) + 0 if s1(j) = s2(j)
# D(i - 1, j - 1) + 1 (substitution)
# D(i, j - 1) + 1 (insertion)
# D(i - 1, j) + 1 (deletion)
#
# The computed distance will be in D(len(s1) + 1, len(s2) + 1).
#
# note: double brackets indicate 1-based indices below, e.g. s1[[1]]
def _one_based(l): # makes an enumerated generator 1-based
return ((i + 1, x) for i, x in l)
def _prev_curr(l): # yields pairs of (x[i - 1], x[i]) for i in 1, 2, ...
prev = None
for curr in l:
yield prev, curr
prev = curr
def _levenshtein_d0(s2): # compute D(0, ...)
return list(range(len(s2) + 1)) # see (1), (3)
def _levenshtein_di(c1, s2, i, d_prev, same, cost): # compute one new row
# given c1 = s1[i], s2, i, d_prev = D(i - 1, ...), compute D(i, ...)
yield i # start with D(i, 0) = i, see (2)
d_curr_prev_j = i # d_curr_prev_j stores D(i, j - 1)
for j, c2 in _one_based(enumerate(s2)): # c2 = s2[[j]]
cond = (0 if same(c1, c2) else cost)
d_curr_j = min( # see (4)
d_prev[j - 1] + cond, # D(i - 1, j - 1) + cond; substitution
d_curr_prev_j + 1, # D(i, j - 1) + 1; insertion
d_prev[j] + 1) # D(i - 1, j) + 1; deletion
yield d_curr_j
d_curr_prev_j = d_curr_j
def _levenshtein(s1, s2, same):
d_prev = _levenshtein_d0(s2)
for i, c1 in _one_based(enumerate(s1)): # c1 = s1[[i]]
d_prev = list(_levenshtein_di(c1, s2, i, d_prev, same, 1))
return d_prev[-1]
def _damerau_levenshtein(s1, s2, same):
# _damerau_levenshtein works like _levenshtein, except for one additional
# rule covering transposition:
#
# if i > 1 and j > 1 and a[i] == b[j - 1] and a[i - 1] == b[j] then
# D(i, j) = minimum(D(i, j), D(i - 2, j - 2) + transposition_cost)
def row(d_prev_prev, d_prev, i, prev_c1, c1, cost):
# given c1 = s1[i], d_prev_prev = D(i - 2), d_prev = D(i - 1),
# prev_c1 = s1[[i - 1]], c1 = s1[[i]], compute D(i, ...)
for j, d_curr_j in enumerate(_levenshtein_di(c1, s2, i, d_prev, same, cost)):
if i > 1 and j > 1:
if same(c1, s2[j - 2]) and same(prev_c1, s2[j - 1]): # transposition?
# i.e. if s1[[i]] = s2[[j-1]] and s1[[i-1]] = s2[[j]]
d_curr_j = min(d_curr_j, d_prev_prev[j - 2] + cost)
yield d_curr_j
d_prev_prev = None
d_prev = _levenshtein_d0(s2)
for i, (prev_c1, c1) in _one_based(enumerate(_prev_curr(s1))):
d_curr = list(row(d_prev_prev, d_prev, i, prev_c1, c1, 1))
d_prev_prev = d_prev
d_prev = d_curr
return d_prev[-1]
def _levenshtein_like_or_border_cases(s1, s2, same, compute):
if len(s1) == len(s2) and all(same(c1, c2) for c1, c2 in zip(s1, s2)):
return 0
if len(s1) < len(s2):
s1, s2 = s2, s1
if len(s2) == 0:
return len(s1)
return compute(s1, s2, same)
class EditDistance(_StringDistance):
"""
<dl>
<dt>'EditDistance[$a$, $b$]'
<dd>returns the Levenshtein distance of $a$ and $b$, which is defined as the minimum number of
insertions, deletions and substitutions on the constituents of $a$ and $b$ needed to transform
one into the other.
</dl>
>> EditDistance["kitten", "kitchen"]
= 2
>> EditDistance["abc", "ac"]
= 1
>> EditDistance["abc", "acb"]
= 2
>> EditDistance["azbc", "abxyc"]
= 3
The IgnoreCase option makes EditDistance ignore the case of letters:
>> EditDistance["time", "Thyme"]
= 3
>> EditDistance["time", "Thyme", IgnoreCase -> True]
= 2
EditDistance also works on lists:
>> EditDistance[{1, E, 2, Pi}, {1, E, Pi, 2}]
= 2
"""
def _distance(self, s1, s2, same):
return _levenshtein_like_or_border_cases(s1, s2, same, _levenshtein)
class DamerauLevenshteinDistance(_StringDistance):
"""
<dl>
<dt>'DamerauLevenshteinDistance[$a$, $b$]'
<dd>returns the Damerau-Levenshtein distance of $a$ and $b$, which is defined as the minimum number of
transpositions, insertions, deletions and substitutions needed to transform one into the other.
In contrast to EditDistance, DamerauLevenshteinDistance counts transposition of adjacent items (e.g.
"ab" into "ba") as one operation of change.
</dl>
>> DamerauLevenshteinDistance["kitten", "kitchen"]
= 2
>> DamerauLevenshteinDistance["abc", "ac"]
= 1
>> DamerauLevenshteinDistance["abc", "acb"]
= 1
>> DamerauLevenshteinDistance["azbc", "abxyc"]
= 3
The IgnoreCase option makes DamerauLevenshteinDistance ignore the case of letters:
>> DamerauLevenshteinDistance["time", "Thyme"]
= 3
>> DamerauLevenshteinDistance["time", "Thyme", IgnoreCase -> True]
= 2
DamerauLevenshteinDistance also works on lists:
>> DamerauLevenshteinDistance[{1, E, 2, Pi}, {1, E, Pi, 2}]
= 1
"""
def _distance(self, s1, s2, same):
return _levenshtein_like_or_border_cases(s1, s2, same, _damerau_levenshtein)
class RemoveDiacritics(Builtin):
"""
<dl>
<dt>'RemoveDiacritics[$s$]'
<dd>returns a version of $s$ with all diacritics removed.
</dl>
>> RemoveDiacritics["en prononçant pêcher et pécher"]
= en prononcant pecher et pecher
>> RemoveDiacritics["piñata"]
= pinata
"""
def apply(self, s, evaluation):
'RemoveDiacritics[s_String]'
return String(unicodedata.normalize(
'NFKD', s.get_string_value()).encode('ascii', 'ignore').decode('ascii'))
class Transliterate(Builtin):
"""
<dl>
<dt>'Transliterate[$s$]'
<dd>transliterates a text in some script into an ASCII string.
</dl>
# The following examples were taken from
# https://en.wikipedia.org/wiki/Iliad,
# https://en.wikipedia.org/wiki/Russian_language, and
# https://en.wikipedia.org/wiki/Hiragana
>> Transliterate["μήτηρ γάρ τέ μέ φησι θεὰ Θέτις ἀργυρόπεζα"]
= meter gar te me phesi thea Thetis arguropeza
>> Transliterate["Алекса́ндр Пу́шкин"]
= <NAME>
>> Transliterate["つかう"]
= tsukau
"""
requires = (
'unidecode',
)
def apply(self, s, evaluation):
'Transliterate[s_String]'
from unidecode import unidecode
return String(unidecode(s.get_string_value()))
class StringTrim(Builtin):
"""
<dl>
<dt>'StringTrim[$s$]'
<dd>returns a version of $s$ with whitespace removed from start and end.
</dl>
>> StringJoin["a", StringTrim[" \\tb\\n "], "c"]
= abc
>> StringTrim["ababaxababyaabab", RegularExpression["(ab)+"]]
= axababya
"""
def apply(self, s, evaluation):
'StringTrim[s_String]'
return String(s.get_string_value().strip(" \t\n"))
def apply_pattern(self, s, patt, expression, evaluation):
'StringTrim[s_String, patt_]'
text = s.get_string_value()
if not text:
return s
py_patt = to_regex(patt, evaluation)
if py_patt is None:
return evaluation.message('StringExpression', 'invld', patt, expression)
if not py_patt.startswith(r'\A'):
left_patt = r'\A' + py_patt
else:
left_patt = py_patt
if not py_patt.endswith(r'\Z'):
right_patt = py_patt + r'\Z'
else:
right_patt = py_patt
m = re.search(left_patt, text)
left = m.end(0) if m else 0
m = re.search(right_patt, text)
right = m.start(0) if m else len(text)
return String(text[left:right])
class StringInsert(Builtin):
"""
<dl>
<dt>'StringInsert["$string$", "$snew$", $n$]'
<dd>yields a string with $snew$ inserted starting at position $n$ in $string$.
<dt>'StringInsert["$string$", "$snew$", -$n$]'
<dd>inserts a at position $n$ from the end of "$string$".
<dt>'StringInsert["$string$", "$snew$", {$n_1$, $n_2$, ...}]'
<dd>inserts a copy of $snew$ at each position $n_i$ in $string$;
the $n_i$ are taken before any insertion is done.
<dt>'StringInsert[{$s_1$, $s_2$, ...}, "$snew$", $n$]'
<dd>gives the list of resutls for each of the $s_i$.
</dl>
>> StringInsert["noting", "h", 4]
= nothing
#> StringInsert["abcdefghijklm", "X", 15]
: Cannot insert at position 15 in abcdefghijklm.
= StringInsert[abcdefghijklm, X, 15]
#> StringInsert[abcdefghijklm, "X", 4]
: String or list of strings expected at position 1 in StringInsert[abcdefghijklm, X, 4].
= StringInsert[abcdefghijklm, X, 4]
#> StringInsert["abcdefghijklm", X, 4]
: String expected at position 2 in StringInsert[abcdefghijklm, X, 4].
= StringInsert[abcdefghijklm, X, 4]
#> StringInsert["abcdefghijklm", "X", a]
: Position specification a in StringInsert[abcdefghijklm, X, a] is not a machine-sized integer or a list of machine-sized integers.
= StringInsert[abcdefghijklm, X, a]
#> StringInsert["abcdefghijklm", "X", 0]
: Cannot insert at position 0 in abcdefghijklm.
= StringInsert[abcdefghijklm, X, 0]
>> StringInsert["note", "d", -1]
= noted
>> StringInsert["here", "t", -5]
= | |
<filename>rt_viewer/GIRealTime.py
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
\file GIRealTime.py
\copyright Copyright (c) 2019 Visual Computing group of Ulm University,
Germany. See the LICENSE file at the top-level directory of
this distribution.
\author <NAME> (<EMAIL>)
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
import sys
import math
import time
import argparse
import importlib
import os
import numpy as np
import ctypes
import OpenGL
OpenGL.ERROR_CHECKING = False
OpenGL.ERROR_LOGGING = False
from OpenGL import GL
import pygame as pg
from OpenGLUtils import ShaderLoader, MeshRenderer, Camera, FrameBuffer, EnvMap
from MeshHelpers import read_model, generate_rendering_buffers, sample_mesh
from TFRealTimeImpFast import tfImplementation
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(ROOT_DIR, 'cuda_ops'))
import screenproc
FLOAT_SIZE = 4
INT_SIZE = 4
current_milli_time = lambda: time.time() * 1000.0
##################################################################### Rendering class and functions
class GLScene:
def __init__(self, width, height, ptMin, ptMax, vertexs, faces, pts, normals, trainedModel,
camParams, usePlane, objColor, sssParams, planeColor, convRadius, grow, pdRadius, gi, sss,
envMap, lightInt, numVLP):
self.lighting_ = True
self.grow_ = grow
# Store gi usage
self.gi_ = gi or sss
self.sss_ = sss
self.lightInt_ = lightInt
self.useFloor_ = usePlane
self.planeColor_ = planeColor
self.objColor_ = objColor
self.sssParams_ = sssParams
# Configure OpenGL state.
GL.glEnable(GL.GL_CULL_FACE)
GL.glCullFace(GL.GL_BACK)
GL.glFrontFace(GL.GL_CW)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glDepthMask(GL.GL_TRUE)
GL.glDepthFunc(GL.GL_LEQUAL)
# Create the camera.
self.objCenterPt_ = (ptMax + ptMin)*0.5
self.objPtMax_ = ptMax
self.objPtMin_ = ptMin
aabbSize = math.sqrt(np.sum((ptMax-ptMin) ** 2))
if camParams is None:
self.camera_ = Camera(
[0.0, 0.0, 0.0],
[0.0, 0.0, -aabbSize*1.5],
[0.0, 1.0, 0.0],
float(width)/float(height),
45.0, 0.1, aabbSize*5.0)
else:
valsCam = []
with open(camParams, 'r') as paramsFile:
for line in paramsFile:
lineElements = line.split(',')
valsCam.append([float(lineElements[0]),float(lineElements[1]),float(lineElements[2])])
self.camera_ = Camera(
valsCam[0],
valsCam[1],
valsCam[2],
float(width)/float(height),
45.0, 0.1, aabbSize*5.0)
self.viewMat_ = self.camera_.get_view_natrix()
self.projMat_ = self.camera_.get_projection_matrix()
# Load the shaders.
self.shaderLoader_ = ShaderLoader()
self.shaderMesh_ = self.shaderLoader_.load_shader(
["shaders/meshVert.glsl", "shaders/meshFrag.glsl"],
[GL.GL_VERTEX_SHADER, GL.GL_FRAGMENT_SHADER])
self.viewMatrixUnif_ = GL.glGetUniformLocation(self.shaderMesh_, "viewMatrix")
self.projMatrixUnif_ = GL.glGetUniformLocation(self.shaderMesh_, "projMatrix")
self.textSMUnif_ = GL.glGetUniformLocation(self.shaderMesh_, "shadowMapTex")
self.camPosRenderUnif_ = GL.glGetUniformLocation(self.shaderMesh_, "camPos")
self.objColorUnif_ = GL.glGetUniformLocation(self.shaderMesh_, "objColor")
self.numVLPUnif_ = GL.glGetUniformLocation(self.shaderMesh_, "numVLP")
self.numVLPAxisUnif_ = GL.glGetUniformLocation(self.shaderMesh_, "numVLPAxis")
self.shaderRender_ = self.shaderLoader_.load_shader(
["shaders/renderVert.glsl", "shaders/renderFrag.glsl"],
[GL.GL_VERTEX_SHADER, GL.GL_FRAGMENT_SHADER])
self.textUnif_ = GL.glGetUniformLocation(self.shaderRender_, "colorTex")
self.textPosUnif_ = GL.glGetUniformLocation(self.shaderRender_, "posTex")
self.textNormalUnif_ = GL.glGetUniformLocation(self.shaderRender_, "normalTex")
self.textMattUnif_ = GL.glGetUniformLocation(self.shaderRender_, "matTex")
self.textLightUnif_ = GL.glGetUniformLocation(self.shaderRender_, "lightTex")
self.directLightRenderUnif_ = GL.glGetUniformLocation(self.shaderRender_, "directLight")
self.giRenderUnif_ = GL.glGetUniformLocation(self.shaderRender_, "gi")
# Load the mesh.
self.mesh_ = MeshRenderer(np.array(vertexs), np.array(faces), [3,3])
# Add color to the normals for GI.
if self.gi_:
auxBuff = np.array([[self.objColor_[0], self.objColor_[1], self.objColor_[2]] for i in range(len(pts[0]))])
finalNormals = []
for i in range(len(pts)):
finalNormals.append(np.concatenate((normals[i], auxBuff), axis=1))
normals = np.array(finalNormals)
#Add material properties for sss.
if self.sss_:
auxBuff = np.array([self.sssParams_ for i in range(len(pts[0]))])
finalNormals = []
for i in range(len(pts)):
finalNormals.append(np.concatenate((normals[i], auxBuff), axis=1))
normals = np.array(finalNormals)
# Create the floor
if self.useFloor_:
self.floor_ = MeshRenderer(np.array([[-aabbSize*0.4, self.objPtMin_[1]-self.objCenterPt_[1]-0.005, -aabbSize*0.4, 0.0, 1.0, 0.0,
aabbSize*0.4, self.objPtMin_[1]-self.objCenterPt_[1]-0.005, -aabbSize*0.4, 0.0, 1.0, 0.0,
aabbSize*0.4, self.objPtMin_[1]-self.objCenterPt_[1]-0.005, aabbSize*0.4, 0.0, 1.0, 0.0,
-aabbSize*0.4, self.objPtMin_[1]-self.objCenterPt_[1]-0.005, aabbSize*0.4, 0.0, 1.0, 0.0]]),
np.array([0, 1, 2, 0, 2, 3]),
[3,3])
if not self.sss_:
planePts = []
planeNormals = []
for i in range(50000):
planePts.append([(np.random.random()- 0.5)*aabbSize*0.8, self.objPtMin_[1]-self.objCenterPt_[1], (np.random.random()- 0.5)*aabbSize*0.8])
if self.gi_:
planeNormals.append([0.0, 1.0, 0.0, self.planeColor_[0], self.planeColor_[1], self.planeColor_[2]])
else:
planeNormals.append([0.0, 1.0, 0.0])
planePts = np.array(planePts)
planeNormals = np.array(planeNormals)
finalPts = []
finalNormals = []
for i in range(len(pts)):
finalPts.append(np.concatenate((pts[i], planePts), axis=0))
finalNormals.append(np.concatenate((normals[i], planeNormals), axis=0))
pts = np.array(finalPts)
normals = np.array(finalNormals)
# Create the quad mesh.
self.quad_ = MeshRenderer(np.array([[-1.0, 1.0, 0.5,
1.0, 1.0, 0.5,
1.0, -1.0, 0.5,
-1.0, -1.0, 0.5]]),
np.array([0, 1, 2, 0, 2, 3]),
[3])
# Create environment map.
self.envMap_ = EnvMap(envMap, self.shaderLoader_, aabbSize, lightInt, 8192, numVLP)
self.envMap_.clear_shadow_map()
self.envMap_.update_shadow_maps(self.mesh_)
if self.useFloor_:
self.envMap_.update_shadow_maps(self.floor_)
# Create the frame buffer.
self.frameBuffer_ = FrameBuffer([GL.GL_RGBA32F, GL.GL_RGBA32F, GL.GL_RGBA32F, GL.GL_RGBA32F], width, height)
# Create output texture
self.outTexture_ = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.outTexture_)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA32F, width, height, 0, GL.GL_RGBA, GL.GL_FLOAT, None)
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
# Resize viewport.
self.width_ = width
self.height_ = height
GL.glViewport(0, 0, width, height)
# Initialize mouse variables.
self.lastRotated_ = False
self.mouseX_ = 0.0
self.mouseY_ = 0.0
self.tfImplementation_ = tfImplementation(pts, normals, trainedModel,
self.shaderLoader_, self.gi_, self.sss_, self.envMap_,
convRadius, grow, pdRadius)
self.init_tf_weights_buffers()
self.init_abstract_feature_pts_buffers()
self.accumTime_ = 0.0
self.accumTimeCounter_ = 0
def init_abstract_feature_pts_buffers(self):
pts, features, cellIndexs, aabbMin, aabbMax, _ = self.tfImplementation_.calculate_abstract_features()
self.gridSize_ = cellIndexs.shape[1]
pts = pts.flatten().tolist()
features = features.flatten().tolist()
cellIndexs = cellIndexs.flatten().tolist()
aabb = np.concatenate((aabbMin, aabbMax), axis=0).flatten().tolist()
self.ptsSSBO_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.ptsSSBO_)
ArrayType = GL.GLfloat*len(pts)
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(pts)*FLOAT_SIZE, ArrayType(*pts), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
self.featuresSSBO_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.featuresSSBO_)
ArrayType = GL.GLfloat*len(features)
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(features)*FLOAT_SIZE, ArrayType(*features), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
self.cellIndexsSSBO_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.cellIndexsSSBO_)
ArrayType = GL.GLuint*len(cellIndexs)
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(cellIndexs)*INT_SIZE, ArrayType(*cellIndexs), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
self.aabbSSBO_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.aabbSSBO_)
ArrayType = GL.GLfloat*len(aabb)
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(aabb)*FLOAT_SIZE, ArrayType(*aabb), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
mTexture1 = self.frameBuffer_.get_texture(0)
mTexture2 = self.frameBuffer_.get_texture(1)
screenproc.initInteroperabilityGLCUDA(self.width_, self.height_, int(mTexture1),
int(mTexture2), int(self.outTexture_), int(self.ptsSSBO_), int(self.featuresSSBO_), int(self.aabbSSBO_),
int(self.cellIndexsSSBO_), self.tfImplementation_.radius_)
if self.gi_:
screenproc.initInteroperabilityGLCUDAGI()
if self.sss_:
screenproc.initInteroperabilityGLCUDASSS(self.frameBuffer_.get_texture(2), self.frameBuffer_.get_texture(3),
self.sssParams_[0], self.sssParams_[1], self.sssParams_[2], self.sssParams_[3])
def init_tf_weights_buffers(self):
self.weightsConv1_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.weightsConv1_)
ArrayType = GL.GLfloat*len(self.tfImplementation_.networkWeights_[0])
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(self.tfImplementation_.networkWeights_[0])*FLOAT_SIZE,
ArrayType(*self.tfImplementation_.networkWeights_[0]), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
self.weightsConv2_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.weightsConv2_)
ArrayType = GL.GLfloat*len(self.tfImplementation_.networkWeights_[1])
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(self.tfImplementation_.networkWeights_[1])*FLOAT_SIZE,
ArrayType(*self.tfImplementation_.networkWeights_[1]), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
self.weightsConv3_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.weightsConv3_)
ArrayType = GL.GLfloat*len(self.tfImplementation_.networkWeights_[2])
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(self.tfImplementation_.networkWeights_[2])*FLOAT_SIZE,
ArrayType(*self.tfImplementation_.networkWeights_[2]), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
self.biasesConv1_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.biasesConv1_)
ArrayType = GL.GLfloat*len(self.tfImplementation_.networkWeights_[3])
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(self.tfImplementation_.networkWeights_[3])*FLOAT_SIZE,
ArrayType(*self.tfImplementation_.networkWeights_[3]), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
self.biasesConv2_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.biasesConv2_)
ArrayType = GL.GLfloat*len(self.tfImplementation_.networkWeights_[4])
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(self.tfImplementation_.networkWeights_[4])*FLOAT_SIZE,
ArrayType(*self.tfImplementation_.networkWeights_[4]), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
self.biasesConv3_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.biasesConv3_)
ArrayType = GL.GLfloat*len(self.tfImplementation_.networkWeights_[5])
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(self.tfImplementation_.networkWeights_[5])*FLOAT_SIZE,
ArrayType(*self.tfImplementation_.networkWeights_[5]), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
self.meanBN1_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.meanBN1_)
ArrayType = GL.GLfloat*len(self.tfImplementation_.networkWeights_[6])
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(self.tfImplementation_.networkWeights_[6])*FLOAT_SIZE,
ArrayType(*self.tfImplementation_.networkWeights_[6]), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
self.varianceBN1_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.varianceBN1_)
ArrayType = GL.GLfloat*len(self.tfImplementation_.networkWeights_[7])
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(self.tfImplementation_.networkWeights_[7])*FLOAT_SIZE,
ArrayType(*self.tfImplementation_.networkWeights_[7]), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
self.gammaBN1_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.gammaBN1_)
ArrayType = GL.GLfloat*len(self.tfImplementation_.networkWeights_[8])
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(self.tfImplementation_.networkWeights_[8])*FLOAT_SIZE,
ArrayType(*self.tfImplementation_.networkWeights_[8]), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
self.betaBN1_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.betaBN1_)
ArrayType = GL.GLfloat*len(self.tfImplementation_.networkWeights_[9])
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(self.tfImplementation_.networkWeights_[9])*FLOAT_SIZE,
ArrayType(*self.tfImplementation_.networkWeights_[9]), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
self.weightsMLP1_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.weightsMLP1_)
ArrayType = GL.GLfloat*len(self.tfImplementation_.networkWeights_[10])
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(self.tfImplementation_.networkWeights_[10])*FLOAT_SIZE,
ArrayType(*self.tfImplementation_.networkWeights_[10]), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
self.biasesMLP1_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.biasesMLP1_)
ArrayType = GL.GLfloat*len(self.tfImplementation_.networkWeights_[11])
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(self.tfImplementation_.networkWeights_[11])*FLOAT_SIZE,
ArrayType(*self.tfImplementation_.networkWeights_[11]), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
self.meanBN2_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.meanBN2_)
ArrayType = GL.GLfloat*len(self.tfImplementation_.networkWeights_[12])
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(self.tfImplementation_.networkWeights_[12])*FLOAT_SIZE,
ArrayType(*self.tfImplementation_.networkWeights_[12]), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
self.varianceBN2_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.varianceBN2_)
ArrayType = GL.GLfloat*len(self.tfImplementation_.networkWeights_[13])
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(self.tfImplementation_.networkWeights_[13])*FLOAT_SIZE,
ArrayType(*self.tfImplementation_.networkWeights_[13]), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
self.gammaBN2_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.gammaBN2_)
ArrayType = GL.GLfloat*len(self.tfImplementation_.networkWeights_[14])
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(self.tfImplementation_.networkWeights_[14])*FLOAT_SIZE,
ArrayType(*self.tfImplementation_.networkWeights_[14]), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
self.betaBN2_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.betaBN2_)
ArrayType = GL.GLfloat*len(self.tfImplementation_.networkWeights_[15])
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(self.tfImplementation_.networkWeights_[15])*FLOAT_SIZE,
ArrayType(*self.tfImplementation_.networkWeights_[15]), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
self.weightsMLP2_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.weightsMLP2_)
ArrayType = GL.GLfloat*len(self.tfImplementation_.networkWeights_[16])
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(self.tfImplementation_.networkWeights_[16])*FLOAT_SIZE,
ArrayType(*self.tfImplementation_.networkWeights_[16]), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
self.biasesMLP2_ = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, self.biasesMLP2_)
ArrayType = GL.GLfloat*len(self.tfImplementation_.networkWeights_[17])
GL.glBufferData(GL.GL_SHADER_STORAGE_BUFFER, len(self.tfImplementation_.networkWeights_[17])*FLOAT_SIZE,
ArrayType(*self.tfImplementation_.networkWeights_[17]), GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_SHADER_STORAGE_BUFFER, 0)
screenproc.initInteroperabilityGLCUDAConvWeights(self.grow_,
int(self.weightsConv1_), int(self.weightsConv2_), int(self.weightsConv3_),
int(self.biasesConv1_), int(self.biasesConv2_), int(self.biasesConv3_))
screenproc.initInteroperabilityGLCUDAMLPWeights(
int(self.meanBN1_), int(self.varianceBN1_), int(self.gammaBN1_), int(self.betaBN1_),
int(self.weightsMLP1_), int(self.biasesMLP1_),
int(self.meanBN2_), int(self.varianceBN2_) , int(self.gammaBN2_), int(self.betaBN2_),
int(self.weightsMLP2_), int(self.biasesMLP2_))
def update(self, rotate, mouseX, mouseY):
if rotate and self.lastRotated_:
self.camera_.rotate_x((mouseY-self.mouseY_)/500.0)
self.camera_.rotate_y((mouseX-self.mouseX_)/500.0)
self.viewMat_ = self.camera_.get_view_natrix()
self.mouseX_ = mouseX
self.mouseY_ = mouseY
self.lastRotated_ = rotate
def display(self):
GL.glEnable(GL.GL_CULL_FACE)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glClearColor(100,100,100,100)
#Render G-buffer
self.frameBuffer_.bind()
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glUseProgram(self.shaderMesh_)
GL.glBindFragDataLocation(self.shaderMesh_, 0, "outputPos")
GL.glBindFragDataLocation(self.shaderMesh_, 1, "outputNormal")
GL.glBindFragDataLocation(self.shaderMesh_, 2, "outputMat")
GL.glBindFragDataLocation(self.shaderMesh_, 3, "outputLight")
GL.glUniformMatrix4fv(self.viewMatrixUnif_, 1, GL.GL_TRUE, np.ascontiguousarray(self.viewMat_, dtype=np.float32))
GL.glUniformMatrix4fv(self.projMatrixUnif_, 1, GL.GL_TRUE, np.ascontiguousarray(self.projMat_, dtype=np.float32))
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.envMap_.frameBufferShadows_.get_shadow_map())
GL.glUniform1i(self.textSMUnif_, 0)
GL.glUniform3f(self.camPosRenderUnif_, self.camera_.obs_[0], self.camera_.obs_[1], self.camera_.obs_[2])
GL.glUniform1i(self.numVLPUnif_, self.envMap_.numVPL_)
GL.glUniform1i(self.numVLPAxisUnif_, self.envMap_.numPtsAxis_)
GL.glBindBufferBase(GL.GL_SHADER_STORAGE_BUFFER, 0, self.envMap_.vlpPosSSBO_)
GL.glBindBufferBase(GL.GL_SHADER_STORAGE_BUFFER, 1, self.envMap_.vlpIntSSBO_)
GL.glBindBufferBase(GL.GL_SHADER_STORAGE_BUFFER, 2, self.envMap_.vlpViewMatSSBO_)
GL.glBindBufferBase(GL.GL_SHADER_STORAGE_BUFFER, 3, self.envMap_.vlpProjMatSSBO_)
if self.useFloor_:
GL.glUniform3f(self.objColorUnif_, self.planeColor_[0], self.planeColor_[1], self.planeColor_[2])
self.floor_.render_mesh()
GL.glUniform3f(self.objColorUnif_, self.objColor_[0], self.objColor_[1], self.objColor_[2])
self.mesh_.render_mesh()
GL.glUseProgram(0)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
#Compute NN
screenproc.computeAOTexture(self.gridSize_)
#Render result.
GL.glDisable(GL.GL_CULL_FACE)
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
attachments = [GL.GL_COLOR_ATTACHMENT0]
GL.glDrawBuffers(1, attachments)
GL.glUseProgram(self.shaderRender_)
GL.glBindFragDataLocation(self.shaderRender_, 0, "outputColor")
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.outTexture_)
GL.glUniform1i(self.textUnif_, 0)
GL.glActiveTexture(GL.GL_TEXTURE1)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.frameBuffer_.get_texture(0))
GL.glUniform1i(self.textPosUnif_, 1)
GL.glActiveTexture(GL.GL_TEXTURE2)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.frameBuffer_.get_texture(1))
GL.glUniform1i(self.textNormalUnif_, 2)
GL.glActiveTexture(GL.GL_TEXTURE3)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.frameBuffer_.get_texture(2))
GL.glUniform1i(self.textMattUnif_, 3)
GL.glActiveTexture(GL.GL_TEXTURE4)
GL.glBindTexture(GL.GL_TEXTURE_2D,self.frameBuffer_.get_texture(3))
GL.glUniform1i(self.textLightUnif_, 4)
GL.glActiveTexture(GL.GL_TEXTURE0)
if self.lighting_:
GL.glUniform1f(self.directLightRenderUnif_, 0.0)
else:
GL.glUniform1f(self.directLightRenderUnif_, 1.0)
if self.gi_:
if self.sss_:
GL.glUniform2f(self.giRenderUnif_, 1.0, 1.0)
else:
GL.glUniform2f(self.giRenderUnif_, 1.0, 0.0)
else:
GL.glUniform2f(self.giRenderUnif_, 0.0, 0.0)
self.quad_.render_mesh()
GL.glUseProgram(0)
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
##################################################################### MAIN
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Script to train GINN')
parser.add_argument('--in3DModel', help='3D input model')
parser.add_argument('--in3DModelScale', default=1.0, type=float, help='3DModelScale (default: 1.0)')
parser.add_argument('--inTrainedModel', default='../trained_networks/sss.ckpt', help='Input trained model (default: ../trained_networks/sss.ckpt)')
parser.add_argument('--model', default='MCGINetwork', help='model (default: MCGINetwork)')
parser.add_argument('--grow', default=8, type=int, help='Grow rate (default: 8)')
parser.add_argument('--camParams', default=None, help='Camera parameters file')
parser.add_argument('--usePlane', action='store_true', help='Use ground plane (default: False)')
parser.add_argument('--convRadius', default=0.05, type=float, help='Radius convolution (default: 0.05)')
parser.add_argument('--pdRadius', default=0.01, type=float, help='Radius poisson disk (default: 0.01)')
parser.add_argument('--lightIntensity', default=100.0, type=float, help='Light intensity (default: 100.0)')
parser.add_argument('--envMap', default='env_maps/spruit_sunrise_1k.hdr', help='model (default: envmaps/spruit_sunrise_1k.hdr)')
parser.add_argument('--numVLP', default=1024, type=int, help='Number of virtual point lights | |
sudo=sudo, directory=directory)
os.system(f"{Boolean(sudo).string(true='sudo ', false='')}mv {from_} {to_}")
def base(
# the path (str, FilePath) (#1).
path=None,
# the dirs back.
back=1,
):
if path == None: raise ValueError("Define parameter: path:str.")
path = str(path)
base = path.replace('//','/')
if base[len(base)-1] == '/': base = base[:-1]
if len(base.split("/")) <= 1: raise ValueError("Path [{}] has no base.".format(base))
startslash = True
if base[0] != "/":
startslash = False
base = base.split("/")
m, c, s = len(base), 0, ""
for i in base:
if c >= m-back: break
if c == 0:
s = f"/{i}/"
else:
s += f"{i}/"
c += 1
if startslash:
return s
else:
return s[1:]
#
#
# the file object class.
class File(object):
def __init__(self, path=None, data=None, load=False, default=None):
# docs.
DOCS = {
"module":"File",
"initialized":False,
"description":[],
"chapter": "Defaults", }
# check self instance.
if isinstance(data, Files.File):
data = data.data
# init.
if path == False: self.file_path = self.fp = None # used in local memory (not fysical)
else: self.file_path = self.fp = Formats.FilePath(path)
self.data = data
if default != None and not os.path.exists(self.file_path.path):
self.save(data=default)
if load: self.load()
# can be filled with executing [self.x = x()]:
def load(self, default=None, sudo=False):
utils.__check_memory_only__(str(self.file_path.path))
if not os.path.exists(str(self.file_path.path)) and default != None:
self.save(data=default, sudo=sudo)
self.data = Files.load(self.file_path.path, format=str, sudo=sudo)
return self.data
def load_line(self, line_number, default=None, sudo=False):
utils.__check_memory_only__(self.file_path.path)
if not os.path.exists(self.file_path.path) and default != None:
self.save(str(default), self.file_path.path, sudo=sudo)
data = Files.load(self.file_path.path, format=str, sudo=sudo)
return data.split('\n')[line_number]
def save(self, data=None, path=None, overwrite_duplicates=True, sudo=False):
if path == None: path = self.file_path.path
if data == None: data = self.data
utils.__check_memory_only__(path)
if overwrite_duplicates:
self.data = data
return Files.save(path, data, sudo=sudo)
else:
file_name, original_path = Formats.FilePath(path).name(), path
extension = file_name.split('.')[file_name.count('.')]
file_name_without_extension = file_name.replace(extension, '')
while True:
if not os.path.exists(path): break
else: path = original_path.replace(file_name, file_name_without_extension+'-'+str(index)+extension)
self.data = data
return Files.save(path, data, sudo=sudo)
def check(self, default=None, save=True):
if default != None and isinstance(default, (str, String)):
if not self.fp.exists():
self.data = default
if save:
self.save(data=default)
# support default iteration.
def __iter__(self):
return iter(self.data)
# support '>=' & '>' operator.
def __gt__(self, string):
if not isinstance(string, str):
return len(self) > len(string)
elif not isinstance(string, self.__class__):
raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {string.__class__}.")
return len(self) > len(string.data)
def __ge__(self, string):
if not isinstance(string, str):
return len(self) >= len(string)
elif not isinstance(string, self.__class__):
raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {string.__class__}.")
return len(self) >= len(string.data)
# support '<=' & '<' operator.
def __lt__(self, string):
if not isinstance(string, str):
return len(self) < len(string)
elif not isinstance(string, self.__class__):
raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {string.__class__}.")
return len(self) < len(string.data)
def __le__(self, string):
if not isinstance(string, str):
return len(self) <= len(string)
elif not isinstance(string, self.__class__):
raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {string.__class__}.")
return len(self) <= len(string.data)
# support '==' & '!=' operator.
def __eq__(self, string):
if not isinstance(string, str):
return self.data == string
elif not isinstance(string, self.__class__):
return False
return self.data == string.data
def __ne__(self, string):
if not isinstance(string, str):
return self.data != string
elif not isinstance(string, self.__class__):
return True
return self.data != string.data
# support 'in' operator.
def __contains__(self, key):
if isinstance(key, (list, Files.Array)):
for i in key:
if i in self.data:
return True
return False
else:
return key in self.data
# str representation.
def __str__(self):
return str(self.data)
# content count.
def __len__(self):
return len(self.data)
# object id.
def __id__(self):
return f"({self.instance()}:{str(self)})"
# object instance.
def instance(self):
return "File"
#
# support self assignment.
def assign(self, data):
if isinstance(data, self.__class__):
data = data.data
self.data = data
return self
# return raw data.
def raw(self):
return self.data
#
# the array object class.
class Array(object):
def __init__(self,
# the array (param #1).
array=[],
# the path (param #2).
path=False,
# load the data on initialization.
load=False,
# the default array (will be created if file path does not exist).
default=None,
):
# docs.
DOCS = {
"module":"Array",
"initialized":False,
"description":[],
"chapter": "Defaults", }
# check self instance.
if isinstance(array, Files.Array):
array = array.array
elif not isinstance(array, list):
raise Exceptions.InstanceError(f"Parameter [{self.__class__.__name__}.array] must be a [Array] or [list], not [{array.__class__.__name__}].")
# initialize dictionary recursive.
#new = []
#for i in array: new.append(Formats.initialize(i))
#array = new
#if isinstance(array, Array):
# array = array.array
# init.
if path in [False, None]:
self.file_path = self.fp = None # used in local memory (not fysical)
self.__path__ = None
else:
self.file_path = self.fp = Formats.FilePath(path)
self.__path__ = self.file_path.path
self.array = array
if default != None and self.file_path != None and not os.path.exists(self.file_path.path):
self.save(array=default)
self.array = default
if load: self.load()
#
# save to file.
def save(self, array=None, path=None, ensure_ascii=False, indent=4, sudo=False):
if array != None: array = self.array
if path == None: path = self.file_path.path
utils.__check_memory_only__(path)
self.array = array
return Files.save(path, Formats.denitialize(array), format="json", indent=indent, ensure_ascii=ensure_ascii, sudo=sudo)
# load from file.
def load(self, default=None, sudo=False):
utils.__check_memory_only__(self.file_path.path)
if not os.path.exists(self.file_path.path) and default != None:
self.save(default, sudo=sudo)
self.array = Files.load(self.file_path.path, format="json", sudo=sudo)
return self.array
# convert to string.
def string(self, joiner=" ", sum_first=False):
string = ""
for x in self.array:
if sum_first and string == "": string = joiner + str(x)
elif string == '': string = str(x)
else: string += joiner + str(x)
return str(string)
# divide into several arrays.
def divide(self, into=2):
avg = len(self.array) / float(into)
out = []
last = 0.0
while last < len(self.array):
out.append(self.array[int(last):int(last + avg)])
last += avg
if len(out) > into:
while len(out) > into:
last = out.pop(len(out)-1)
out[len(out)-1] += last
return out
# reomve indexes or values.
def remove(self, indexes=[], values=[]):
array = self.array
for i in indexes:
try: array.pop(i)
except: a=1
if values != []:
new = []
for v in array:
if v not in values: new.append(v)
array = new
return Array(array, path=self.__path__)
# default list functions.
def append(self, var):
array = list(self.array)
return Array(array.append(var), path=self.__path__)
def pop(self, index):
array = list(self.array)
return Array(array.pop(index), path=self.__path__)
def count(self, item=None):
if item == None:
return Formats.Integer(len(self.array))
elif isinstance(item, (str, Formats.String)):
c = 0
for i in self:
if i == item: c += 1
return Formats.Integer(c)
elif isinstance(item, (list, Files.Array)):
c = 0
for x in self:
for y in item:
if x == y: c += 1
return Formats.Integer(c)
else: raise Exceptions.InstanceError("Parameter [item] must either be None, String or Array.")
# check.
def check(self, default=None, save=True):
if default != None and isinstance(default, (list, Array)):
if not self.fp.exists():
self.array = default
if save:
self.save(data=default)
else:
for i in default:
if i not in self.array:
self.array.append(i)
if save:
self.save()
# clean content.
def clean(self,
# the string replacements.
# example:
# { "Hello":"hello" }
# [ ["Hello", "hello"] ]
replacements={},
# the first characters to remove (String & Array).
remove_first=[],
# the last characters to remove (String & Array).
remove_last=[],
# the first characters that are ensured (String & Array) (List: check is one of the list is ensured).
ensure_first=[],
# the last characters that are ensured (String & Array) (List: check is one of the list is ensured).
ensure_last=[],
# remove all values within the list from the array.
remove_values=[],
# update the self array.
update=True,
# the dicionary (leave None to use self.array).
array=None,
):
if array == None: array = list(self.array)
if isinstance(remove_first, (str, Formats.String)):
remove_first = [remove_first]
if isinstance(remove_last, (str, Formats.String)):
remove_last = [remove_last]
if isinstance(ensure_first, (str, Formats.String)):
ensure_first = [ensure_first]
if isinstance(ensure_last, (str, Formats.String)):
ensure_last = [ensure_last]
new = []
for item in list(array):
if item not in remove_values:
while True:
edits = False
for i in remove_first:
if len(item) >= len(i) and item[:len(i)] == i:
item = item[len(i):]
edits = True
for i in remove_last:
if len(item) >= len(i) and item[len(i):] == i:
item = item[:-len(i)]
edits = True
for i in ensure_first:
if len(item) >= len(i) and item[:len(i)] != i:
item = i+item
edits = True
for i in ensure_last:
if len(item) >= len(i) and item[len(i):] != i:
item += i
edits = True
for from_, to_ in replacements.items():
if isinstance(item, (str, Formats.String)) and from_ in item:
item = item.replace(from_, to_)
edits = True
if not edits: break
new.append(item)
return Array(new, path=self.__path__)
# iterations.
def iterate(self, sorted=False, reversed=False, array=None):
if array == None: array = list(self.array)
return self.items(reversed=reversed, sorted=sorted, array=array)
# iterate items.
def items(self, sorted=False, reversed=False, array=None):
if array == None: array = list(self.array)
if sorted: array = self.sort(array=array)
if reversed: return self.reversed(array=array)
else: return Array(array, path=self.__path__)
# reserse array.
def reversed(self, array=None):
if array == None: array = self.array
reversed_keys = []
c = len(array)-1
for _ in range(len(array)):
reversed_keys.append(array[c])
c -= 1
return Array(reversed_keys, path=self.__path__)
# sort array.
def sort(self, reversed=False, array=None):
if array == None: array = self.array
return Array(sorted(array, reverse=reversed), path=self.__path__)
# dump json string.
def json(self, sorted=False, reversed=False, indent=4, array=None, ):
#return json.dumps(Formats.denitialize(self), indent=indent)
if array == None: array = self.array
return json.dumps(self.serialize(json=False, sorted=sorted, reversed=reversed, array=array), indent=indent)
# serialize array.
def serialize(self, sorted=False, reversed=False, json=False, array=None):
if array == None: array = self.array
if isinstance(array, Files.Array):
array = array.array
if sorted:
items = self.items(reversed=reversed, array=self.sort(alphabetical=True, array=array))
else:
items = self.items(reversed=reversed, array=array)
new = []
for value in items:
if isinstance(value, (dict, Files.Dictionary)):
value = Files.Dictionary().serialize(json=json, sorted=sorted, reversed=reversed, dictionary=value)
elif isinstance(value, (list, Files.Array)):
value = self.serialize(json=json, sorted=sorted, reversed=reversed, array=value)
elif isinstance(value, object):
value = str(value)
elif isinstance(value, str) or isinstance(value, bool) or value == None:
if value in [True, "True", "True".lower()]:
if json:
value = "true"
else:
value = True
elif value in [False, "False", "False".lower()]:
if json:
value = "false"
else:
value = False
elif value in [None, "None", "None".lower()]:
if json:
value = "null"
else:
value = None
new.append(value)
return new
# randomize the content of the array always non recursive.
def randomize(self,
# optionally pass the array (leave None to use self.array).
array=None,
):
if array == None: array = list(self.array)
randomized = []
while len(array) > 0:
index = random.randrange(0, len(array))
item = array.pop(index)
randomized.append(item)
return Array(randomized, path=self.__path__)
#
# limit the content of the array.
def limit(self,
# limit to the number of samples.
limit:int,
# the index to start from.
start=0,
# optionally pass the array (leave None to use self.array).
array=None,
):
if array == None: array = list(self.array)
return Array(array[start:start+limit], path=self.__path__)
# min of numerical array.
def min(self):
min = self.array[0]
for item in self.array:
if item < min:
min = item
return min
# max of numerical array.
def max(self):
max = self.array[0]
for item in self.array:
if item > max:
max = item
return max
# sum numerical array.
def sum(self):
return sum(self.array)
# mean of numerical array.
def | |
<filename>reana_server/rest/workflows.py
# -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2017, 2018 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Reana-Server workflow-functionality Flask-Blueprint."""
import json
import logging
import traceback
import requests
from bravado.exception import HTTPError
from flask import Blueprint, Response
from flask import current_app as app
from flask import jsonify, request, stream_with_context
from reana_commons.errors import REANAQuotaExceededError, REANAValidationError
from reana_commons.operational_options import validate_operational_options
from reana_commons.yadage import yadage_load_from_workspace
from reana_db.database import Session
from reana_db.models import (
InteractiveSessionType,
ResourceType,
ResourceUnit,
RunStatus,
UserResource,
Workflow,
)
from reana_db.utils import _get_workflow_with_uuid_or_name, get_default_quota_resource
from webargs import fields, validate
from webargs.flaskparser import use_kwargs
from werkzeug.datastructures import Headers
from reana_server.api_client import (
current_rwc_api_client,
current_workflow_submission_publisher,
)
from reana_server.complexity import (
estimate_complexity,
get_workflow_min_job_memory,
)
from reana_server.decorators import check_quota, signin_required
from reana_server.status import NodesStatus
from reana_server.utils import (
RequestStreamWithLen,
_get_reana_yaml_from_gitlab,
clone_workflow,
is_uuid_v4,
)
try:
from urllib import parse as urlparse
except ImportError:
from urlparse import urlparse
blueprint = Blueprint("workflows", __name__)
@blueprint.route("/workflows", methods=["GET"])
@use_kwargs(
{
"page": fields.Int(validate=validate.Range(min=1)),
"size": fields.Int(validate=validate.Range(min=1)),
"include_progress": fields.Bool(location="query"),
"include_workspace_size": fields.Bool(location="query"),
}
)
@signin_required()
def get_workflows(user, **kwargs): # noqa
r"""Get all current workflows in REANA.
---
get:
summary: Returns list of all current workflows in REANA.
description: >-
This resource return all current workflows in JSON format.
operationId: get_workflows
produces:
- application/json
parameters:
- name: access_token
in: query
description: The API access_token of workflow owner.
required: false
type: string
- name: type
in: query
description: Required. Type of workflows.
required: true
type: string
- name: verbose
in: query
description: Optional flag to show more information.
required: false
type: boolean
- name: search
in: query
description: Filter workflows by name.
required: false
type: string
- name: sort
in: query
description: Sort workflows by creation date (asc, desc).
required: false
type: string
- name: status
in: query
description: Filter workflows by list of statuses.
required: false
type: array
items:
type: string
- name: page
in: query
description: Results page number (pagination).
required: false
type: integer
- name: size
in: query
description: Number of results per page (pagination).
required: false
type: integer
- name: include_progress
in: query
description: Include progress information of the workflows.
type: boolean
- name: include_workspace_size
in: query
description: Include size information of the workspace.
type: boolean
responses:
200:
description: >-
Request succeeded. The response contains the list of all workflows.
schema:
type: object
properties:
total:
type: integer
items:
type: array
items:
type: object
properties:
id:
type: string
name:
type: string
status:
type: string
size:
type: object
properties:
raw:
type: number
human_readable:
type: string
user:
type: string
created:
type: string
progress:
type: object
examples:
application/json:
[
{
"id": "256b25f4-4cfb-4684-b7a8-73872ef455a1",
"name": "mytest.1",
"status": "running",
"size":{
"raw": 10490000,
"human_readable": "10 MB"
},
"user": "00000000-0000-0000-0000-000000000000",
"created": "2018-06-13T09:47:35.66097",
},
{
"id": "3c9b117c-d40a-49e3-a6de-5f89fcada5a3",
"name": "mytest.2",
"status": "finished",
"size":{
"raw": 12580000,
"human_readable": "12 MB"
},
"user": "00000000-0000-0000-0000-000000000000",
"created": "2018-06-13T09:47:35.66097",
},
{
"id": "72e3ee4f-9cd3-4dc7-906c-24511d9f5ee3",
"name": "mytest.3",
"status": "created",
"size":{
"raw": 184320,
"human_readable": "180 KB"
},
"user": "00000000-0000-0000-0000-000000000000",
"created": "2018-06-13T09:47:35.66097",
},
{
"id": "c4c0a1a6-beef-46c7-be04-bf4b3beca5a1",
"name": "mytest.4",
"status": "created",
"size": {
"raw": 1074000000,
"human_readable": "1 GB"
},
"user": "00000000-0000-0000-0000-000000000000",
"created": "2018-06-13T09:47:35.66097",
}
]
400:
description: >-
Request failed. The incoming payload seems malformed.
403:
description: >-
Request failed. User is not allowed to access workflow.
examples:
application/json:
{
"message": "User 00000000-0000-0000-0000-000000000000
is not allowed to access workflow
256b25f4-4cfb-4684-b7a8-73872ef455a1"
}
404:
description: >-
Request failed. User does not exist.
examples:
application/json:
{
"message": "User 00000000-0000-0000-0000-000000000000 does not
exist."
}
500:
description: >-
Request failed. Internal controller error.
examples:
application/json:
{
"message": "Something went wrong."
}
"""
try:
type_ = request.args.get("type", "batch")
search = request.args.get("search")
sort = request.args.get("sort", "desc")
status = request.args.getlist("status")
verbose = json.loads(request.args.get("verbose", "false").lower())
response, http_response = current_rwc_api_client.api.get_workflows(
user=str(user.id_),
type=type_,
search=search,
sort=sort,
status=status or None,
verbose=bool(verbose),
**kwargs,
).result()
return jsonify(response), http_response.status_code
except HTTPError as e:
logging.error(traceback.format_exc())
return jsonify(e.response.json()), e.response.status_code
except json.JSONDecodeError:
logging.error(traceback.format_exc())
return jsonify({"message": "Your request contains not valid JSON."}), 400
except ValueError as e:
logging.error(traceback.format_exc())
return jsonify({"message": str(e)}), 403
except Exception as e:
logging.error(traceback.format_exc())
return jsonify({"message": str(e)}), 500
@blueprint.route("/workflows", methods=["POST"])
@signin_required(include_gitlab_login=True)
@check_quota
def create_workflow(user): # noqa
r"""Create a workflow.
---
post:
summary: Creates a new workflow based on a REANA specification file.
description: >-
This resource is expecting a REANA specification in JSON format with
all the necessary information to instantiate a workflow.
operationId: create_workflow
consumes:
- application/json
produces:
- application/json
parameters:
- name: workflow_name
in: query
description: Name of the workflow to be created. If not provided
name will be generated.
required: true
type: string
# probably need to rename this to something more specific
- name: spec
in: query
description: Remote repository which contains a valid REANA
specification.
required: false
type: string
- name: reana_specification
in: body
description: REANA specification with necessary data to instantiate
a workflow.
required: false
schema:
type: object
- name: access_token
in: query
description: The API access_token of workflow owner.
required: false
type: string
responses:
201:
description: >-
Request succeeded. The workflow has been created.
schema:
type: object
properties:
message:
type: string
workflow_id:
type: string
workflow_name:
type: string
examples:
application/json:
{
"message": "The workflow has been successfully created.",
"workflow_id": "cdcf48b1-c2f3-4693-8230-b066e088c6ac",
"workflow_name": "mytest.1"
}
400:
description: >-
Request failed. The incoming payload seems malformed
403:
description: >-
Request failed. User is not allowed to access workflow.
examples:
application/json:
{
"message": "User 00000000-0000-0000-0000-000000000000
is not allowed to access workflow
256b25f4-4cfb-4684-b7a8-73872ef455a1"
}
404:
description: >-
Request failed. User does not exist.
examples:
application/json:
{
"message": "User 00000000-0000-0000-0000-000000000000 does not
exist."
}
500:
description: >-
Request failed. Internal controller error.
501:
description: >-
Request failed. Not implemented.
"""
try:
if request.json:
if "object_kind" in request.json:
(
reana_spec_file,
git_url,
workflow_name,
git_branch,
git_commit_sha,
) = _get_reana_yaml_from_gitlab(request.json, user.id_)
git_data = {
"git_url": git_url,
"git_branch": git_branch,
"git_commit_sha": git_commit_sha,
}
else:
# validate against schema
git_data = {}
reana_spec_file = request.json
workflow_name = ""
workflow_engine = reana_spec_file["workflow"]["type"]
elif request.args.get("spec"):
return jsonify("Not implemented"), 501
else:
raise Exception(
"Either remote repository or a reana spec need to \
be provided"
)
if workflow_engine not in app.config["AVAILABLE_WORKFLOW_ENGINES"]:
raise Exception("Unknown workflow type.")
workflow_name = request.args.get("workflow_name", workflow_name)
if is_uuid_v4(workflow_name):
return jsonify({"message": "Workflow name cannot be a valid UUIDv4."}), 400
workflow_dict = {
"reana_specification": reana_spec_file,
"workflow_name": workflow_name,
}
workflow_dict["operational_options"] = validate_operational_options(
workflow_engine, reana_spec_file.get("inputs", {}).get("options", {})
)
if git_data:
workflow_dict["git_data"] = git_data
response, http_response = current_rwc_api_client.api.create_workflow(
workflow=workflow_dict, user=str(user.id_)
).result()
if git_data:
Workflow.update_workflow_status(
Session, response["workflow_id"], RunStatus.queued
)
current_workflow_submission_publisher.publish_workflow_submission(
user_id=str(user.id_),
workflow_id_or_name=response["workflow_id"],
parameters=request.json,
)
return jsonify(response), http_response.status_code
except HTTPError as e:
logging.error(traceback.format_exc())
return jsonify(e.response.json()), e.response.status_code
except (KeyError, REANAValidationError) as e:
logging.error(traceback.format_exc())
return jsonify({"message": str(e)}), 400
except ValueError as e:
logging.error(traceback.format_exc())
return jsonify({"message": str(e)}), 403
except Exception as e:
logging.error(traceback.format_exc())
return jsonify({"message": str(e)}), 500
@blueprint.route("/workflows/<workflow_id_or_name>/specification", methods=["GET"])
@signin_required()
def get_workflow_specification(workflow_id_or_name, user): # noqa
r"""Get workflow specification.
---
get:
summary: Get the specification used for this workflow run.
description: >-
This resource returns the REANA workflow specification used to start
the workflow run. Resource is expecting a workflow UUID.
operationId: get_workflow_specification
produces:
- application/json
parameters:
- name: access_token
in: query
description: API access_token of workflow owner.
required: false
type: string
- name: workflow_id_or_name
in: path
description: Required. Analysis UUID or name.
required: true
type: string
responses:
200:
description: >-
Request succeeded. Workflow specification is returned.
schema:
type: object
examples:
application/json:
{
"inputs": {
"parameters": {
"helloworld": "code/helloworld.py",
"inputfile": "data/names.txt",
"outputfile": "results/greetings.txt",
"sleeptime": 0
}
},
"workflow": {
"specification": {
"steps": [
{
"commands": [
"echo 'Hello World!'"
],
"environment": "busybox"
}
]
},
"type": "serial"
}
}
403:
description: >-
Request failed. User is not allowed to access workflow.
examples:
application/json:
{
"message": "User 00000000-0000-0000-0000-000000000000
is not allowed to access workflow
256b25f4-4cfb-4684-b7a8-73872ef455a1"
}
404:
description: >-
Request failed. User does not exist.
examples:
application/json:
{
"message": "Workflow cdcf48b1-c2f3-4693-8230-b066e088c6ac does
not exist"
}
500:
description: >-
Request failed. Internal controller error.
"""
try:
if not workflow_id_or_name:
raise ValueError("workflow_id_or_name is not supplied")
workflow = _get_workflow_with_uuid_or_name(workflow_id_or_name, str(user.id_))
return (
jsonify(
{
"specification": workflow.reana_specification,
"parameters": workflow.input_parameters,
}
),
200,
)
except HTTPError as e:
logging.error(traceback.format_exc())
return jsonify(e.response.json()), e.response.status_code
except ValueError as e:
logging.error(traceback.format_exc())
return jsonify({"message": str(e)}), 403
except Exception as e:
logging.error(traceback.format_exc())
return jsonify({"message": str(e)}), 500
@blueprint.route("/workflows/<workflow_id_or_name>/logs", methods=["GET"])
@use_kwargs(
{
"page": fields.Int(validate=validate.Range(min=1)),
"size": fields.Int(validate=validate.Range(min=1)),
| |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from typing import List, Union, Tuple
from macrosynergy.management.simulate_quantamental_data import make_qdf
from macrosynergy.management.shape_dfs import reduce_df
class NaivePnL:
"""Computes and collects illustrative PnLs with limited signal options and
disregarding transaction costs
:param <pd.Dataframe> df: standardized data frame with the following necessary
columns: 'cid', 'xcat', 'real_date' and 'value'.
:param <str> ret: return category.
:param <List[str]> sigs: signal categories.
:param <List[str]> cids: cross sections to be considered. Default is all in the
dataframe.
:param <str> start: earliest date in ISO format. Default is None and earliest date
in df is used.
:param <str> end: latest date in ISO format. Default is None and latest date in df
is used.
:param <dict> blacklist: cross sections with date ranges that should be excluded
from the dataframe.
"""
def __init__(self, df: pd.DataFrame, ret: str, sigs: List[str],
cids: List[str] = None,
start: str = None, end: str = None,
blacklist: dict = None):
self.ret = ret
self.sigs = sigs
xcats = [ret] + sigs
cols = ['cid', 'xcat', 'real_date', 'value']
self.df, self.xcats, self.cids = reduce_df(df[cols], xcats, cids, start, end,
blacklist, out_all=True)
self.df['real_date'] = pd.to_datetime(self.df['real_date'])
self.pnl_names = [] # list for PnL names
self.black = blacklist
def make_pnl(self, sig: str, sig_op: str = 'zn_score_pan', pnl_name: str = None,
rebal_freq: str = 'daily', rebal_slip = 0, vol_scale: float = None,
min_obs: int = 252, iis: bool = True,
neutral: str = 'zero', thresh: float = None):
# Todo: implement the four 'pass through arguments to make_zn_score()
"""Calculate daily PnL and add to the main dataframe of the class instance
:param <str> sig: name of signal that is the basis for positioning. The signal
is assumed to be recorded at the end of the day prior to position taking.
:param <str> sig_op: signal transformation options; must be one of
'zn_score_pan', 'zn_score_cs', or 'binary'.
Default 'zn_score_pan' transforms raw signals into z-scores around zero value
based on the whole panel.
Option 'zn_score_cs' transforms signals to z-scores around zero based on
cross-section alone.
Option 'binary' transforms signals into uniform long/shorts (1/-1) across all
sections.
N.B.: zn-score here means standardized score with zero being the natural
neutral level and standardization through division by mean absolute value.
:param <str> pnl_name: name of the PnL to be generated and stored.
Default is none, i.e. a default name is given.
Previously calculated PnLs in the class will be overwritten. This means that
if a set of PnLs is to be compared they require custom names.
:param <str> rebal_freq: rebalancing frequency for positions according to signal
must be one of 'daily' (default), 'weekly' or 'monthly'.
:param <str> rebal_slip: rebalancing slippage in days. Default is 1, which means
that it takes one day to rebalance the position and that the new positions
produces PnL from the second day after the signal has been recorded.
:param <bool> vol_scale: ex-post scaling of PnL to annualized volatility given.
This for comparative visualization and not out-of-sample. Default is none.
:param <int> min_obs: the minimum number of observations required to calculate
zn_scores. Default is 252.
# Todo: implement in function
:param <bool> iis: if True (default) zn-scores are also calculated for the initial
sample period defined by min-obs, on an in-sample basis, to avoid losing history.
# Todo: implement in function
:param <str> neutral: method to determine neutral level. Default is 'zero'.
Alternatives are 'mean' and "median".
# Todo: implement in function
:param <float> thresh: threshold value beyond which scores are winsorized,
i.e. contained at that threshold. Therefore, the threshold is the maximum absolute
score value that the function is allowed to produce. The minimum threshold is 1
standard deviation.
# Todo: implement in function
"""
assert sig in self.sigs
assert sig_op in ['zn_score_pan', 'zn_score_cs', 'binary']
assert rebal_freq in ['daily', 'weekly', 'monthly']
dfx = self.df[self.df['xcat'].isin([self.ret, sig])]
dfw = dfx.pivot(index=['cid', 'real_date'], columns='xcat', values='value')
if sig_op == 'zn_score_pan':
# Todo: below is in-sample; use make_zn_score() for oos calculation
# Todo: pass through min_obs, iss, neutral, thresh
sda = dfw[sig].abs().mean()
dfw['psig'] = dfw[sig] / sda
elif sig_op == 'zn_score_cs': # zn-score based on
# Todo: below is in-sample; use make_zn_score() for oos calculation
# Todo: pass through min_obs, iss, neutral, thresh
zn_score = lambda x: x / np.nanmean(np.abs(x))
dfw['psig'] = dfw[sig].groupby(level=0).apply(zn_score)
elif sig_op == 'binary':
dfw['psig'] = np.sign(dfw[sig])
# Signal for the following day explains the lag mechanism.
dfw['psig'] = dfw['psig'].groupby(level=0).shift(1) # lag explanatory 1 period
dfw.reset_index(inplace=True)
if rebal_freq != 'daily':
dfw['year'] = dfw['real_date'].dt.year
if rebal_freq == 'monthly':
dfw['month'] = dfw['real_date'].dt.month
rebal_dates = dfw.groupby(['cid', 'year', 'month'])['real_date'].\
min() # rebalancing days are first of month
if rebal_freq == 'weekly':
dfw['week'] = dfw['real_date'].dt.week
rebal_dates = dfw.groupby(['cid', 'year', 'week'])['real_date'].\
min() # rebalancing days are first of week
dfw['sig'] = np.nan
dfw.loc[dfw['real_date'].isin(rebal_dates), 'sig'] = \
dfw.loc[dfw['real_date'].isin(rebal_dates), 'psig']
dfw['sig'] = dfw['sig'].fillna(method='ffill').shift(rebal_slip)
dfw['value'] = dfw[self.ret] * dfw['sig']
df_pnl = dfw.loc[:, ['cid', 'real_date', 'value']] # cross-section PnLs
df_pnl_all = df_pnl.groupby(['real_date']).sum() # global PnL as sum
df_pnl_all = df_pnl_all[df_pnl_all['value'].cumsum() != 0] # trim early zeros
df_pnl_all['cid'] = 'ALL'
df_pnl_all = df_pnl_all.reset_index()[df_pnl.columns] # columns as in df_pnl...
df_pnl = df_pnl.append(df_pnl_all) #... and append
if vol_scale is not None:
leverage = vol_scale * (df_pnl_all['value'].std() * np.sqrt(261))**(-1)
df_pnl['value'] = df_pnl['value'] * leverage
pnn = ('PNL_' + sig) if pnl_name is None else pnl_name # set PnL name
df_pnl['xcat'] = pnn
if pnn in self.pnl_names:
self.df = self.df[~(self.df['xcat'] == pnn)] # remove any PnL with same name
else:
self.pnl_names = self.pnl_names + [pnn]
self.df = self.df.append(df_pnl[self.df.columns]).reset_index(drop=True)
def plot_pnls(self, pnl_cats: List[str], pnl_cids: List[str] = ['ALL'],
start: str = None, end: str = None, figsize: Tuple = (10, 6)):
"""Plot line chart of cumulative PnLs, single PnL, multiple PnL types per
cross section, or mutiple cross sections per PnL type.
:param <List[str]> pnl_cats: list of PnL categories that should be plotted.
:param <List[str]> pnl_cids: list of cross sections to be plotted;
default is 'ALL' (global PnL).
Note: one can only have multiple PnL categories or multiple cross sections,
not both.
:param <str> start: start date in ISO format.
:param <str> start: earliest date in ISO format. Default is None and earliest
date in df is used.
:param <str> end: latest date in ISO format. Default is None and latest date
in df is used.
:param <Tuple> figsize: tuple of plot width and height. Default is (10,6).
"""
if pnl_cats is None:
pnl_cats = self.pnl_names
assert (len(pnl_cats) == 1) | (len(pnl_cids) == 1)
dfx = reduce_df(self.df, pnl_cats, pnl_cids, start, end, self.black,
out_all=False)
sns.set_theme(style='whitegrid', palette='colorblind',
rc={'figure.figsize': figsize})
if len(pnl_cids) == 1:
dfx['cum_value'] = dfx.groupby('xcat').cumsum()
ax = sns.lineplot(data=dfx, x='real_date', y='cum_value', hue='xcat',
estimator=None, lw=1)
leg = ax.axes.get_legend()
if len(pnl_cats) > 1:
leg.set_title('PnL categories for ' + pnl_cids[0])
else:
leg.set_title('PnL category for ' + pnl_cids[0])
else:
dfx['cum_value'] = dfx.groupby('cid').cumsum()
ax = sns.lineplot(data=dfx, x='real_date', y='cum_value', hue='cid',
estimator=None, lw=1)
leg = ax.axes.get_legend()
leg.set_title('Cross sections')
plt.title('Cumulative naive PnL', fontsize=16)
plt.xlabel('')
plt.ylabel('% of risk capital, no compounding')
plt.axhline(y=0, color='black', linestyle='--', lw=1)
plt.show()
def evaluate_pnls(self, pnl_cats: List[str], pnl_cids: List[str] = ['ALL'],
start: str = None, end: str = None):
"""Small table of key PnL statistics
:param <List[str]> pnl_cats: list of PnL categories that should be plotted.
:param <List[str]> pnl_cids: list of cross sections to be plotted; default is
'ALL' (global PnL).
Note: one can only have multiple PnL categories or multiple cross sections,
not both.
:param <str> start: start date in format.
:param <str> start: earliest date in ISO format. Default is None and earliest
date in df is used.
:param <str> end: latest date in ISO format. Default is None and latest date
in df is used.
:return: standardized dataframe with key PnL performance statistics
"""
if pnl_cats is None:
pnl_cats = self.pnl_names
assert (len(pnl_cats) == 1) | (len(pnl_cids) == 1)
dfx = reduce_df(self.df, pnl_cats, pnl_cids, start, end, self.black,
out_all=False)
groups = 'xcat' if len(pnl_cids) == 1 else 'cid'
stats = ['Return (pct ar)', 'St. Dev. (pct ar)', 'Sharpe ratio', 'Sortino ratio',
| |
# Copyright 2022 The Balsa Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Balsa.
Usage:
# Experiment configs are declared in experiments.py.
# Look up the name and pass --run <name>.
python -u run.py --run <name> 2>&1 | tee run.log
Use Main() to modify hparams for debugging.
"""
import collections
import copy
import logging
import os
import pickle
import pprint
import signal
import time
from absl import app
from absl import flags
import numpy as np
import pandas as pd
import psycopg2
import pytorch_lightning as pl
from pytorch_lightning import loggers as pl_loggers
import ray
import ray.util
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import lr_scheduler
import torch.utils.data
from torch.utils.tensorboard import SummaryWriter
import wandb
import balsa
from balsa import costing
from balsa import envs
from balsa import execution
from balsa import plan_analysis
from balsa.experience import Experience
from balsa.models.transformer import ReportModel
from balsa.models.transformer import Transformer
from balsa.models.transformer import TransformerV2
from balsa.models.treeconv import TreeConvolution
import balsa.optimizer as optim
from balsa.util import dataset as ds
from balsa.util import plans_lib
from balsa.util import postgres
import sim as sim_lib
import pg_executor
from pg_executor import dbmsx_executor
import train_utils
import experiments # noqa # pylint: disable=unused-import
FLAGS = flags.FLAGS
flags.DEFINE_string('run', 'Balsa_JOBRandSplit', 'Experiment config to run.')
flags.DEFINE_boolean('local', False,
'Whether to use local engine for query execution.')
def GetDevice():
return 'cuda' if torch.cuda.is_available() else 'cpu'
def Save(obj, path):
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'wb') as f:
pickle.dump(obj, f)
return path
def SaveText(text, path):
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path + '.tmp', 'w') as f:
f.write(text)
f.write('\n')
os.replace(path + '.tmp', path)
return path
def MakeModel(p, exp, dataset):
dev = GetDevice()
num_label_bins = int(
dataset.costs.max().item()) + 2 # +1 for 0, +1 for ceil(max cost).
query_feat_size = len(exp.query_featurizer(exp.nodes[0]))
batch = exp.featurizer(exp.nodes[0])
assert batch.ndim == 1
plan_feat_size = batch.shape[0]
if p.tree_conv:
labels = num_label_bins if p.cross_entropy else 1
return TreeConvolution(feature_size=query_feat_size,
plan_size=plan_feat_size,
label_size=labels,
version=p.tree_conv_version).to(dev)
else:
plan_vocab_size = exp.featurizer.pad() + 1 # +1 for PAD.
parent_pos_vocab_size = exp.pos_featurizer.pad() + 1
d_model = 256
d_ff = 1024
num_layers = 4
num_heads = 4
clazz = TransformerV2 if p.v2 else Transformer
return clazz(
plan_vocab_size,
parent_pos_vocab_size,
d_model,
num_heads,
d_ff,
num_layers,
d_query_feat=query_feat_size,
plan_pad_idx=exp.featurizer.pad(),
parent_pos_pad_idx=exp.pos_featurizer.pad(),
use_pos_embs=p.pos_embs,
dropout=p.dropout,
cross_entropy=p.cross_entropy,
max_label_bins=num_label_bins,
).to(dev)
@ray.remote
def ExecuteSql(query_name,
sql_str,
hint_str,
hinted_plan,
query_node,
predicted_latency,
curr_timeout_ms=None,
found_plans=None,
predicted_costs=None,
silent=False,
is_test=False,
use_local_execution=True,
plan_physical=True,
repeat=1,
engine='postgres'):
"""Executes a query.
Returns:
If use_local_execution:
A (pg_executor, dbmsx_executor).Result.
Else:
A ray.ObjectRef of the above.
"""
# Unused args.
del query_name, hinted_plan, query_node, predicted_latency, found_plans,\
predicted_costs, silent, is_test, plan_physical
assert engine in ('postgres', 'dbmsx'), engine
if engine == 'postgres':
return postgres.ExplainAnalyzeSql(sql_str,
comment=hint_str,
verbose=False,
geqo_off=True,
timeout_ms=curr_timeout_ms,
remote=not use_local_execution)
else:
return DbmsxExecuteSql(sql_str,
comment=hint_str,
timeout_ms=curr_timeout_ms,
remote=not use_local_execution,
repeat=repeat)
def AddCommentToSql(sql_str, comment, engine):
"""Adds a comment (hint string) to a SQL string."""
fns = {
'postgres': PostgresAddCommentToSql,
'dbmsx': DbmsxAddCommentToSql,
}
return fns[engine](sql_str, comment)
def PostgresAddCommentToSql(sql_str, comment=None):
"""Postgres: <comment> <SELECT ...>."""
return comment + '\n' + sql_str
def DbmsxAddCommentToSql(sql_str, comment=None):
raise NotImplementedError
def DbmsxExecuteSql(sql_str,
comment=None,
timeout_ms=None,
remote=True,
repeat=1):
raise NotImplementedError
def DbmsxNodeToHintStr(node, with_physical_hints=False):
"""Converts a plans_lib.Node plan into Dbmsx-compatible hint string."""
raise NotImplementedError
def HintStr(node, with_physical_hints, engine):
if engine == 'postgres':
return node.hint_str(with_physical_hints=with_physical_hints)
assert engine == 'dbmsx', engine
return DbmsxNodeToHintStr(node, with_physical_hints=with_physical_hints)
def ParseExecutionResult(result_tup,
query_name,
sql_str,
hint_str,
hinted_plan,
query_node,
predicted_latency,
curr_timeout_ms=None,
found_plans=None,
predicted_costs=None,
silent=False,
is_test=False,
use_local_execution=True,
plan_physical=True,
repeat=None,
engine='postgres'):
del repeat # Unused.
messages = []
result = result_tup.result
has_timeout = result_tup.has_timeout
server_ip = result_tup.server_ip
if has_timeout:
assert not result, result
if engine == 'dbmsx':
real_cost = -1 if has_timeout else result_tup.latency
else:
if has_timeout:
real_cost = -1
else:
json_dict = result[0][0][0]
real_cost = json_dict['Execution Time']
if hint_str is not None:
# Check that the hint has been respected. No need to check if running
# baseline.
do_hint_check = True
if engine == 'dbmsx':
raise NotImplementedError
else:
if not has_timeout:
executed_node = postgres.ParsePostgresPlanJson(json_dict)
else:
# Timeout has occurred & 'result' is empty. Fallback to
# checking against local Postgres.
print('Timeout occurred; checking the hint against local PG.')
executed_node, _ = postgres.SqlToPlanNode(sql_str,
comment=hint_str,
verbose=False)
executed_node = plans_lib.FilterScansOrJoins(executed_node)
executed_hint_str = executed_node.hint_str(
with_physical_hints=plan_physical)
if do_hint_check and hint_str != executed_hint_str:
print('initial\n', hint_str)
print('after\n', executed_hint_str)
msg = 'Hint not respected for {}; server_ip={}'.format(
query_name, server_ip)
try:
assert False, msg
except Exception as e:
print(e, flush=True)
import ipdb
ipdb.set_trace()
if not silent:
messages.append('{}Running {}: hinted plan\n{}'.format(
'[Test set] ' if is_test else '', query_name, hinted_plan))
messages.append('filters')
messages.append(pprint.pformat(query_node.info['all_filters']))
messages.append('')
messages.append('q{},{:.1f},{}'.format(query_node.info['query_name'],
real_cost, hint_str))
messages.append(
'{} Execution time: {:.1f} (predicted {:.1f}) curr_timeout_ms={}'.
format(query_name, real_cost, predicted_latency, curr_timeout_ms))
if hint_str is None or silent:
# Running baseline: don't print debug messages below.
return result_tup, real_cost, server_ip, '\n'.join(messages)
messages.append('Expert plan: latency, predicted, hint')
expert_hint_str = query_node.hint_str()
expert_hint_str_physical = query_node.hint_str(with_physical_hints=True)
messages.append(' {:.1f} (predicted {:.1f}) {}'.format(
query_node.cost, query_node.info['curr_predicted_latency'],
expert_hint_str))
if found_plans:
if predicted_costs is None:
predicted_costs = [None] * len(found_plans)
messages.append('SIM-predicted costs, predicted latency, plan: ')
min_p_latency = np.min([p_latency for p_latency, _ in found_plans])
for p_cost, found in zip(predicted_costs, found_plans):
p_latency, found_plan = found
found_hint_str = found_plan.hint_str()
found_hint_str_physical = HintStr(found_plan,
with_physical_hints=True,
engine=engine)
extras = [
'cheapest' if p_latency == min_p_latency else '',
'[expert plan]'
if found_hint_str_physical == expert_hint_str_physical else '',
'[picked]' if found_hint_str_physical == hint_str else ''
]
extras = ' '.join(filter(lambda s: s, extras)).strip()
if extras:
extras = '<-- {}'.format(extras)
if p_cost:
messages.append(' {:.1f} {:.1f} {} {}'.format(
p_cost, p_latency, found_hint_str, extras))
else:
messages.append(' {:.1f} {} {}'.format(
p_latency, found_hint_str, extras))
messages.append('-' * 80)
return result_tup, real_cost, server_ip, '\n'.join(messages)
def _GetQueryFeaturizerClass(p):
return {
True: sim_lib.SimQueryFeaturizer,
False: plans_lib.QueryFeaturizer,
'SimQueryFeaturizerV2': sim_lib.SimQueryFeaturizerV2,
'SimQueryFeaturizerV3': sim_lib.SimQueryFeaturizerV3,
'SimQueryFeaturizerV4': sim_lib.SimQueryFeaturizerV4,
}[p.sim_query_featurizer]
def TrainSim(p, loggers=None):
sim_p = sim_lib.Sim.Params()
# Copy over relevant params.
sim_p.workload.query_dir = p.query_dir
sim_p.workload.query_glob = p.query_glob
sim_p.workload.test_query_glob = p.test_query_glob
sim_p.workload.search_space_join_ops = p.search_space_join_ops
sim_p.workload.search_space_scan_ops = p.search_space_scan_ops
sim_p.skip_data_collection_geq_num_rels = 12
if p.cost_model == 'mincardcost':
sim_p.search.cost_model = costing.MinCardCost.Params()
else:
sim_p.search.cost_model = costing.PostgresCost.Params()
sim_p.query_featurizer_cls = _GetQueryFeaturizerClass(p)
sim_p.plan_featurizer_cls = plans_lib.TreeNodeFeaturizer
sim_p.infer_search_method = p.search_method
sim_p.infer_beam_size = p.beam
sim_p.infer_search_until_n_complete_plans = p.search_until_n_complete_plans
if p.plan_physical:
sim_p.plan_physical = True
# Use a physical-aware plan featurizer.
sim_p.plan_featurizer_cls = plans_lib.PhysicalTreeNodeFeaturizer
sim_p.generic_ops_only_for_min_card_cost = \
p.generic_ops_only_for_min_card_cost
sim_p.label_transforms = p.label_transforms
sim_p.tree_conv_version = p.tree_conv_version
sim_p.loss_type = p.loss_type
sim_p.gradient_clip_val = p.gradient_clip_val
sim_p.bs = p.bs
sim_p.epochs = p.epochs
sim_p.perturb_query_features = p.perturb_query_features
sim_p.validate_fraction = p.validate_fraction
# Instantiate.
sim = sim_lib.Sim(sim_p)
if p.sim_checkpoint is None:
sim.CollectSimulationData()
sim.Train(load_from_checkpoint=p.sim_checkpoint, loggers=loggers)
sim.model.freeze()
sim.EvaluateCost()
sim.FreeData()
return sim
def InitializeModel(p,
model,
sim,
soft_assign_tau=0.0,
soft_assign_use_ema=False,
ema_source_tm1=None):
"""Initializes model weights.
Given model_(t-1), sim, ..., ema_source_tm1, initializes model_t as follows.
If soft_assign_use_ema is False:
model := soft_assign_tau*model + (1-soft_assign_tau)*sim.
In particular:
- soft_assign_tau = 0 means always reinitializes 'model' with 'sim'.
- soft_assign_tau = 1 means don't reinitialize 'model'; keep training it
across value iterations.
A value of 0.1 seems to perform well.
Otherwise, use an exponential moving average of "source networks":
source_t = soft_assign_tau * source_(t-1)
+ (1-soft_assign_tau) model_(t-1)
model_t := source_t
In particular:
- soft_assign_tau = 0 means don't reinitialize 'model'; keep training it
across value iterations.
- soft_assign_tau = 1 means always reinitializes 'model' with 'sim'.
A value of 0.05 seems to perform well.
For both schemes, before training 'model' for the very first time it is
always initialized with the simulation model 'sim'.
Args:
p: params.
model: current iteration's value model.
sim: the trained-in-sim model.
soft_assign_tau: if positive, soft initializes 'model' using the formula
described above.
soft_assign_use_ema: whether to use an exponential moving average of
"source networks".
ema_source_tm1: the EMA of source networks at iteration t-1.
"""
def Rename(state_dict):
new_state_dict = collections.OrderedDict()
for key, value in state_dict.items():
new_key = key
if key.startswith('tree_conv.'):
new_key = key.replace('tree_conv.', '')
new_state_dict[new_key] = value
return new_state_dict
sim_weights = sim.model.state_dict()
sim_weights_renamed = copy.deepcopy(Rename(sim_weights))
model_weights = model.state_dict()
assert model_weights.keys() == sim_weights_renamed.keys()
tau = soft_assign_tau
if tau:
if not soft_assign_use_ema:
print('Assigning real model := {}*SIM + {}*previous real model'.
format(1 - tau, tau))
for key, param in model_weights.items():
param.requires_grad = False
param = param * tau + sim_weights_renamed[key] * (1.0 - tau)
param.requires_grad = True
else:
# Use an exponential moving average of source networks.
if ema_source_tm1 is None:
ema_source_tm1 = sim_weights_renamed
assert isinstance(ema_source_tm1,
collections.OrderedDict), ema_source_tm1
assert ema_source_tm1.keys() == model_weights.keys()
# Calculates source_t for | |
Attributes=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type LoadBalancerArn: string
:param LoadBalancerArn: [REQUIRED]
The Amazon Resource Name (ARN) of the load balancer.
:type Attributes: list
:param Attributes: [REQUIRED]
The load balancer attributes.
(dict) --Information about a load balancer attribute.
Key (string) --The name of the attribute.
access_logs.s3.enabled - Indicates whether access logs stored in Amazon S3 are enabled. The value is true or false .
access_logs.s3.bucket - The name of the S3 bucket for the access logs. This attribute is required if access logs in Amazon S3 are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permission to write to the bucket.
access_logs.s3.prefix - The prefix for the location in the S3 bucket. If you don't specify a prefix, the access logs are stored in the root of the bucket.
deletion_protection.enabled - Indicates whether deletion protection is enabled. The value is true or false .
idle_timeout.timeout_seconds - The idle timeout value, in seconds. The valid range is 1-3600. The default is 60 seconds.
Value (string) --The value of the attribute.
:rtype: dict
:return: {
'Attributes': [
{
'Key': 'string',
'Value': 'string'
},
]
}
:returns:
access_logs.s3.enabled - Indicates whether access logs stored in Amazon S3 are enabled. The value is true or false .
access_logs.s3.bucket - The name of the S3 bucket for the access logs. This attribute is required if access logs in Amazon S3 are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permission to write to the bucket.
access_logs.s3.prefix - The prefix for the location in the S3 bucket. If you don't specify a prefix, the access logs are stored in the root of the bucket.
deletion_protection.enabled - Indicates whether deletion protection is enabled. The value is true or false .
idle_timeout.timeout_seconds - The idle timeout value, in seconds. The valid range is 1-3600. The default is 60 seconds.
"""
pass
def modify_rule(RuleArn=None, Conditions=None, Actions=None):
"""
Modifies the specified rule.
Any existing properties that you do not modify retain their current values.
To modify the default action, use ModifyListener .
See also: AWS API Documentation
Examples
This example modifies the condition for the specified rule.
Expected Output:
:example: response = client.modify_rule(
RuleArn='string',
Conditions=[
{
'Field': 'string',
'Values': [
'string',
]
},
],
Actions=[
{
'Type': 'forward',
'TargetGroupArn': 'string'
},
]
)
:type RuleArn: string
:param RuleArn: [REQUIRED]
The Amazon Resource Name (ARN) of the rule.
:type Conditions: list
:param Conditions: The conditions.
(dict) --Information about a condition for a rule.
Field (string) --The name of the field. The possible values are host-header and path-pattern .
Values (list) --The condition value.
If the field name is host-header , you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. Note that you can include up to three wildcard characters.
A-Z, a-z, 0-9
.
(matches 0 or more characters)
? (matches exactly 1 character)
If the field name is path-pattern , you can specify a single path pattern (for example, /img/*). A path pattern is case sensitive, can be up to 128 characters in length, and can contain any of the following characters. Note that you can include up to three wildcard characters.
A-Z, a-z, 0-9
_ - . $ / ~ ' ' @ : +
(using amp;)
(matches 0 or more characters)
? (matches exactly 1 character)
(string) --
:type Actions: list
:param Actions: The actions.
(dict) --Information about an action.
Type (string) -- [REQUIRED]The type of action.
TargetGroupArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the target group.
:rtype: dict
:return: {
'Rules': [
{
'RuleArn': 'string',
'Priority': 'string',
'Conditions': [
{
'Field': 'string',
'Values': [
'string',
]
},
],
'Actions': [
{
'Type': 'forward',
'TargetGroupArn': 'string'
},
],
'IsDefault': True|False
},
]
}
:returns:
A-Z, a-z, 0-9
.
(matches 0 or more characters)
? (matches exactly 1 character)
"""
pass
def modify_target_group(TargetGroupArn=None, HealthCheckProtocol=None, HealthCheckPort=None, HealthCheckPath=None, HealthCheckIntervalSeconds=None, HealthCheckTimeoutSeconds=None, HealthyThresholdCount=None, UnhealthyThresholdCount=None, Matcher=None):
"""
Modifies the health checks used when evaluating the health state of the targets in the specified target group.
To monitor the health of the targets, use DescribeTargetHealth .
See also: AWS API Documentation
Examples
This example changes the configuration of the health checks used to evaluate the health of the targets for the specified target group.
Expected Output:
:example: response = client.modify_target_group(
TargetGroupArn='string',
HealthCheckProtocol='HTTP'|'HTTPS',
HealthCheckPort='string',
HealthCheckPath='string',
HealthCheckIntervalSeconds=123,
HealthCheckTimeoutSeconds=123,
HealthyThresholdCount=123,
UnhealthyThresholdCount=123,
Matcher={
'HttpCode': 'string'
}
)
:type TargetGroupArn: string
:param TargetGroupArn: [REQUIRED]
The Amazon Resource Name (ARN) of the target group.
:type HealthCheckProtocol: string
:param HealthCheckProtocol: The protocol to use to connect with the target.
:type HealthCheckPort: string
:param HealthCheckPort: The port to use to connect with the target.
:type HealthCheckPath: string
:param HealthCheckPath: The ping path that is the destination for the health check request.
:type HealthCheckIntervalSeconds: integer
:param HealthCheckIntervalSeconds: The approximate amount of time, in seconds, between health checks of an individual target.
:type HealthCheckTimeoutSeconds: integer
:param HealthCheckTimeoutSeconds: The amount of time, in seconds, during which no response means a failed health check.
:type HealthyThresholdCount: integer
:param HealthyThresholdCount: The number of consecutive health checks successes required before considering an unhealthy target healthy.
:type UnhealthyThresholdCount: integer
:param UnhealthyThresholdCount: The number of consecutive health check failures required before considering the target unhealthy.
:type Matcher: dict
:param Matcher: The HTTP codes to use when checking for a successful response from a target.
HttpCode (string) -- [REQUIRED]The HTTP codes. You can specify values between 200 and 499. The default value is 200. You can specify multiple values (for example, '200,202') or a range of values (for example, '200-299').
:rtype: dict
:return: {
'TargetGroups': [
{
'TargetGroupArn': 'string',
'TargetGroupName': 'string',
'Protocol': 'HTTP'|'HTTPS',
'Port': 123,
'VpcId': 'string',
'HealthCheckProtocol': 'HTTP'|'HTTPS',
'HealthCheckPort': 'string',
'HealthCheckIntervalSeconds': 123,
'HealthCheckTimeoutSeconds': 123,
'HealthyThresholdCount': 123,
'UnhealthyThresholdCount': 123,
'HealthCheckPath': 'string',
'Matcher': {
'HttpCode': 'string'
},
'LoadBalancerArns': [
'string',
]
},
]
}
:returns:
(string) --
"""
pass
def modify_target_group_attributes(TargetGroupArn=None, Attributes=None):
"""
Modifies the specified attributes of the specified target group.
See also: AWS API Documentation
Examples
This example sets the deregistration delay timeout to the specified value for the specified target group.
Expected Output:
:example: response = client.modify_target_group_attributes(
TargetGroupArn='string',
Attributes=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type TargetGroupArn: string
:param TargetGroupArn: [REQUIRED]
The Amazon Resource Name (ARN) of the target group.
:type Attributes: list
:param Attributes: [REQUIRED]
The attributes.
(dict) --Information about a target group attribute.
Key (string) --The name of the attribute.
deregistration_delay.timeout_seconds - The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused . The range is 0-3600 seconds. The default value is 300 seconds.
stickiness.enabled - Indicates whether sticky sessions are enabled. The value is true or false .
stickiness.type - The type of sticky sessions. The possible value is lb_cookie .
stickiness.lb_cookie.duration_seconds - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).
Value (string) --The value of the attribute.
:rtype: dict
:return: {
'Attributes': [
{
'Key': 'string',
'Value': 'string'
},
]
}
:returns:
deregistration_delay.timeout_seconds - The amount time for Elastic Load Balancing | |
<gh_stars>1-10
from .line2d import Line2D
from .point2d import Point2D
from .vector2d import Vector2D
from numpy.matlib import zeros
from numpy.linalg import solve
from matplotlib.pyplot import figure
from py2md.classes import MDTable
class CubicSpline2D(object):
u"""This class stores a 2D parametric cubic spline."""
pnts = None
npnts = None
clsd = False
pnls = None
npnls = None
d2r = None
dr = None
tanA = None
tanB = None
R = None
def __init__(self, pnts, clsd=False, tanA=None, tanB=None):
u"""This function initialises the object."""
self.pnts = pnts
self.clsd = clsd
self.tanA = tanA
self.tanB = tanB
self.update()
def update(self):
u"""This function calculates the other parameters of the object."""
self.npnts = len(self.pnts)
if self.clsd:
indas = [i for i in range(self.npnts)]
indbs = [i+1 for i in range(self.npnts)]
indbs[-1] = 0
self.npnls = self.npnts
else:
indas = [i for i in range(self.npnts-1)]
indbs = [i+1 for i in range(self.npnts-1)]
self.npnls = self.npnts-1
self.pnls = []
for i in range(self.npnls):
inda = indas[i]
indb = indbs[i]
self.pnls.append(SplineLine2D(self.pnts[inda], self.pnts[indb]))
if self.clsd:
self.d2r = self.calc_d2r_closed()
self.dr = self.calc_dr_closed()
else:
self.d2r = self.calc_d2r_open(tanA=self.tanA, tanB=self.tanB)
self.dr = self.calc_dr_open()
self.R = self.calc_R()
for i, pnl in enumerate(self.pnls):
ia, ib = i, i+1
if ib == self.npnts and self.clsd:
ib = 0
pnl.set_d2r(self.d2r[ia], self.d2r[ib])
def calc_d2r_open(self, tanA=None, tanB=None):
u"""This function calculates the curvature of an open ended spline."""
pnl_dx = [pnl.vec.x/pnl.length for pnl in self.pnls]
pnl_dy = [pnl.vec.y/pnl.length for pnl in self.pnls]
del_dx = [0.0]*self.npnts
del_dy = [0.0]*self.npnts
if tanA != None:
dxA = tanA.x
dyA = tanA.y
del_dx[0] = pnl_dx[0]-dxA
del_dy[0] = pnl_dy[0]-dyA
for i in range(1, self.npnts-1):
del_dx[i] = pnl_dx[i]-pnl_dx[i-1]
del_dy[i] = pnl_dy[i]-pnl_dy[i-1]
if tanB != None:
dxB = tanB.x
dyB = tanB.y
del_dx[-1] = dxB-pnl_dx[-1]
del_dy[-1] = dyB-pnl_dy[-1]
a = [0.0]*self.npnts
b = [1.0]*self.npnts
c = [0.0]*self.npnts
rx = [0.0]*self.npnts
ry = [0.0]*self.npnts
if tanA != None:
sB = self.pnls[0].length
b[0] = sB/3
c[0] = sB/6
rx[0] = del_dx[0]
ry[0] = del_dy[0]
for i in range(1, self.npnts-1):
sA = self.pnls[i-1].length
sB = self.pnls[i].length
a[i] = sA/6
b[i] = (sA+sB)/3
c[i] = sB/6
rx[i] = del_dx[i]
ry[i] = del_dy[i]
if tanB != None:
sA = self.pnls[-1].length
a[-1] = sA/6
b[-1] = sA/3
rx[-1] = del_dx[-1]
ry[-1] = del_dy[-1]
Γ = [0.]*self.npnts
d2x = [0.0]*self.npnts
d2y = [0.0]*self.npnts
β = b[0]
d2x[0] = rx[0]/β
d2y[0] = ry[0]/β
for i in range(1, self.npnts):
Γ[i] = c[i-1]/β
β = b[i]-a[i]*Γ[i]
d2x[i] = (rx[i]-a[i]*d2x[i-1])/β
d2y[i] = (ry[i]-a[i]*d2y[i-1])/β
for i in range(self.npnts-2, -1, -1):
d2x[i] -= Γ[i+1]*d2x[i+1]
d2y[i] -= Γ[i+1]*d2y[i+1]
d2r = [Vector2D(d2x[i], d2y[i]) for i in range(self.npnts)]
return d2r
def calc_d2r_closed(self):
u"""This function calculates the curvature of a closed spline."""
n = self.npnts
inda = [i-1 for i in range(n)]
indb = [i for i in range(n)]
inda[0] = n-1
pnl_dx = [pnl.vec.x/pnl.length for pnl in self.pnls]
pnl_dy = [pnl.vec.y/pnl.length for pnl in self.pnls]
del_dx = [0.]*n
del_dy = [0.]*n
for i in range(n):
del_dx[i] = pnl_dx[indb[i]]-pnl_dx[inda[i]]
del_dy[i] = pnl_dy[indb[i]]-pnl_dy[inda[i]]
A = zeros((n, n))
B = zeros((n, 2))
for i in range(n):
sA = self.pnls[inda[i]].length
sB = self.pnls[indb[i]].length
if i-1 < 0:
A[i, n-1] = sA/6
else:
A[i, i-1] = sA/6
A[i, i] = (sA+sB)/3
if i+1 > n-1:
A[i, 0] = sB/6
else:
A[i,i+1] = sB/6
B[i, 0] = del_dx[i]
B[i, 1] = del_dy[i]
X = solve(A, B)
d2x = [X[i, 0] for i in range(n)]
d2y = [X[i, 1] for i in range(n)]
d2r = [Vector2D(d2x[i], d2y[i]) for i in range(self.npnts)]
return d2r
def calc_dr_open(self):
u"""This function calculates the gradient of an open ended spline."""
dx = []
dy = []
for i in range(self.npnls):
xA = self.pnts[i].x
xB = self.pnts[i+1].x
d2xA = self.d2r[i].x
d2xB = self.d2r[i+1].x
yA = self.pnts[i].y
yB = self.pnts[i+1].y
d2yA = self.d2r[i].y
d2yB = self.d2r[i+1].y
sP = self.pnls[i].length
dxA = (xB-xA)/sP-sP/3*d2xA-sP/6*d2xB
dyA = (yB-yA)/sP-sP/3*d2yA-sP/6*d2yB
dx.append(dxA)
dy.append(dyA)
dxB = (xB-xA)/sP+sP/6*d2xA+sP/3*d2xB
dyB = (yB-yA)/sP+sP/6*d2yA+sP/3*d2yB
dx.append(dxB)
dy.append(dyB)
dr = [Vector2D(dx[i], dy[i]) for i in range(self.npnts)]
return dr
def calc_dr_closed(self):
u"""This function calculates the gradient of a closed spline."""
n = self.npnts
inda = [i for i in range(n)]
indb = [i+1 for i in range(n)]
indb[-1] = 0
dx = []
dy = []
for i in range(self.npnls):
ia = inda[i]
ib = indb[i]
xA = self.pnts[ia].x
xB = self.pnts[ib].x
d2xA = self.d2r[ia].x
d2xB = self.d2r[ib].x
yA = self.pnts[ia].y
yB = self.pnts[ib].y
d2yA = self.d2r[ia].y
d2yB = self.d2r[ib].y
sP = self.pnls[i].length
dxA = (xB-xA)/sP-sP/3*d2xA-sP/6*d2xB
dyA = (yB-yA)/sP-sP/3*d2yA-sP/6*d2yB
dx.append(dxA)
dy.append(dyA)
dr = [Vector2D(dx[i], dy[i]) for i in range(self.npnts)]
return dr
def calc_R(self):
u"""This function calculates the radius of curvature of the spline."""
R = []
for i in range(self.npnts):
dx = self.dr[i].x
dy = self.dr[i].y
d2x = self.d2r[i].x
d2y = self.d2r[i].y
k = (dx*d2y-dy*d2x)/(dx**2+dy**2)**1.5
if k == 0.:
R.append(float('inf'))
else:
R.append(1/k)
return R
def spline_points(self, num: int):
u"""This function interpolates the spline with a number of points."""
x = []
y = []
for pnl in self.pnls:
for i in range(num):
s = float(i*pnl.length/num)
xi, yi = pnl.interpolate_point(s)
x.append(xi)
y.append(yi)
if self.clsd:
x.append(self.pnts[0].x)
y.append(self.pnts[0].y)
else:
x.append(self.pnts[-1].x)
y.append(self.pnts[-1].y)
return x, y
def spline_gradient(self, num: int):
u"""This function interpolates the gradient of the spline."""
dx = []
dy = []
for pnl in self.pnls:
for i in range(num):
s = float(i*pnl.length/num)
dxi, dyi = pnl.interpolate_gradient(s)
dx.append(dxi)
dy.append(dyi)
if self.clsd:
dx.append(self.dr[0].x)
dy.append(self.dr[0].y)
else:
dx.append(self.dr[-1].x)
dy.append(self.dr[-1].y)
return dx, dy
def spline_curvature(self, num: int):
u"""This function interpolates the curvature of the spline."""
d2x = []
d2y = []
for pnl in self.pnls:
for i in range(num):
s = float(i*pnl.length/num)
d2xi, d2yi = pnl.interpolate_curvature(s)
d2x.append(d2xi)
d2y.append(d2yi)
if self.clsd:
d2x.append(self.d2r[0].x)
d2y.append(self.d2r[0].y)
else:
d2x.append(self.d2r[-1].x)
d2y.append(self.d2r[-1].y)
return d2x, d2y
def line_intersection(self, line, all_roots=False):
edct = {}
xP = line.pnt.x
yP = line.pnt.y
dxdl = line.uvec.x
dydl = line.uvec.y
for i in range(self.npnls):
ia = i
xA = self.pnts[ia].x
yA = self.pnts[ia].y
d2xA = self.d2r[ia].x
d2yA = self.d2r[ia].y
ib = i+1
if ib == self.npnts:
ib = 0
xB = self.pnts[ib].x
yB = self.pnts[ib].y
d2xB = self.d2r[ib].x
d2yB = self.d2r[ib].y
sP = self.pnls[i].length
a = -d2xA*dydl/(6*sP) + d2xB*dydl/(6*sP) + d2yA*dxdl/(6*sP) - d2yB*dxdl/(6*sP)
b = d2xA*dydl/2 - d2yA*dxdl/2
c = -d2xA*dydl*sP/3 - d2xB*dydl*sP/6 + d2yA*dxdl*sP/3 + d2yB*dxdl*sP/6 + dxdl*yA/sP - dxdl*yB/sP - dydl*xA/sP + dydl*xB/sP
d = -dxdl*yA + dxdl*yP + dydl*xA - dydl*xP
s1, s2, s3 = cubic_roots(a, b, c, d)
if all_roots:
edct[i] = [s1/sP, s2/sP, s3/sP]
continue
elst = []
if isinstance(s1, float):
e1 = s1/sP
if e1 >= 0.0-sP/1000 and e1 <= sP+sP/1000:
elst.append(e1)
if isinstance(s2, float):
e2 = s2/sP
if e2 >= 0.0-sP/1000 and e2 <= sP+sP/1000:
elst.append(e2)
if isinstance(s3, float):
e3 = s3/sP
if e3 >= -0.000001 and e3 < 1.000001:
elst.append(e3)
if len(elst) > 0:
edct[i] = elst
return edct
def line_intersection_points(self, line, all_roots=False):
edct = self.line_intersection(line, all_roots=all_roots)
print(f'edct = {edct}')
pnts = []
for i in edct:
ia = i
xA = self.pnts[ia].x
yA = self.pnts[ia].y
d2xA = self.d2r[ia].x
d2yA = self.d2r[ia].y
ib = i+1
if ib == self.npnts:
ib = 0
xB = self.pnts[ib].x
yB = self.pnts[ib].y
d2xB = self.d2r[ib].x
d2yB = self.d2r[ib].y
sP = self.pnls[i].length
elst = edct[i]
for e in elst:
s = e*sP
if isinstance(s, float):
if s >= 0.0 and s < sP:
A = (sP-s)/sP
B = s/sP
C = sP**2/6*(A**3-A)
D = sP**2/6*(B**3-B)
x = A*xA+B*xB+C*d2xA+D*d2xB
y = A*yA+B*yB+C*d2yA+D*d2yB
pnts.append(Point2D(x, y))
return pnts
def scatter(self, ax=None, label=False):
u"""This function plots the points of the spline."""
if ax == None:
fig = figure()
ax = fig.gca()
ax.set_aspect('equal')
ax.grid(True)
x = []
y = []
for i in range(self.npnts):
x.append(self.pnts[i].x)
y.append(self.pnts[i].y)
ax.scatter(x,y)
if label:
for i in range(self.npnts):
ax.text(x[i], y[i], i)
return ax
def plot_spline(self, num=1, ax=None, color='blue'):
u"""This function plots the spline using the interpolated points."""
if ax == None:
fig = figure()
ax = fig.gca()
ax.set_aspect('equal')
ax.grid(True)
x, y = self.spline_points(num)
ax.plot(x, y, color=color)
return ax
def arc_length(self, num=1):
u"""This function calculates the arc length of the spline."""
s = []
sc = 0.0
for i | |
<gh_stars>10-100
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
import mindspore
from mindspore import Tensor
import mindspore.nn as nn
import mindspore.context as context
from mindspore.ops import composite as C
class NetTensorDot(nn.Cell):
def __init__(self, axes):
super(NetTensorDot, self).__init__()
self.axes = axes
def construct(self, x, y):
return C.tensor_dot(x, y, self.axes)
class GradNetwork(nn.Cell):
def __init__(self, network):
super(GradNetwork, self).__init__()
self.grad = C.GradOperation(get_all=True, sens_param=True)
self.network = network
def construct(self, input_data_a, input_data_b, sens):
gout = self.grad(self.network)(input_data_a, input_data_b, sens)
return gout
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_tensor_dot_fp32():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
np.random.seed(12876)
shape_x1 = (1, 3, 9, 7)
shape_x2 = (9, 7, 3, 1)
axes = ((1, 3), (2, 1))
x1 = np.random.random(shape_x1).astype(np.float32)
x2 = np.random.random(shape_x2).astype(np.float32)
x1_tensor = Tensor(x1, dtype=mindspore.float32)
x2_tensor = Tensor(x2, dtype=mindspore.float32)
network = NetTensorDot(axes)
ms_result_np = network(x1_tensor, x2_tensor).asnumpy()
np_result = np.tensordot(x1, x2, axes)
np.testing.assert_array_almost_equal(ms_result_np, np_result)
# 1D
shape_x1 = (200)
shape_x2 = (200)
axes = 1
x1 = np.random.random(shape_x1).astype(np.float32)
x2 = np.random.random(shape_x2).astype(np.float32)
x1_tensor = Tensor(x1, dtype=mindspore.float32)
x2_tensor = Tensor(x2, dtype=mindspore.float32)
network = NetTensorDot(axes)
ms_result_np = network(x1_tensor, x2_tensor).asnumpy()
np_result = np.tensordot(x1, x2, axes)
np.allclose(ms_result_np, np_result)
# 2D
shape_x1 = (100, 300)
shape_x2 = (300, 700)
axes = ([1], [0])
x1 = np.random.random(shape_x1).astype(np.float32)
x2 = np.random.random(shape_x2).astype(np.float32)
x1_tensor = Tensor(x1, dtype=mindspore.float32)
x2_tensor = Tensor(x2, dtype=mindspore.float32)
network = NetTensorDot(axes)
ms_result_np = network(x1_tensor, x2_tensor).asnumpy()
np_result = np.tensordot(x1, x2, axes)
np.allclose(ms_result_np, np_result)
# 3D
shape_x1 = (110, 30, 900)
shape_x2 = (900, 70, 30)
axes = ((1, 2), (2, 0))
x1 = np.random.random(shape_x1).astype(np.float32)
x2 = np.random.random(shape_x2).astype(np.float32)
x1_tensor = Tensor(x1, dtype=mindspore.float32)
x2_tensor = Tensor(x2, dtype=mindspore.float32)
network = NetTensorDot(axes)
ms_result_np = network(x1_tensor, x2_tensor).asnumpy()
np_result = np.tensordot(x1, x2, axes)
np.allclose(ms_result_np, np_result)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_tensor_dot_fp16():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
np.random.seed(41329)
shape_x1 = (1, 3, 4, 1)
shape_x2 = (4, 1, 7, 5)
axes = 2 # select first N from
x1 = np.random.random(shape_x1).astype(np.float16)
x2 = np.random.random(shape_x2).astype(np.float16)
x1_tensor = Tensor(x1, dtype=mindspore.float16)
x2_tensor = Tensor(x2, dtype=mindspore.float16)
network = NetTensorDot(axes)
ms_result_np = network(x1_tensor, x2_tensor).asnumpy()
np_result = np.tensordot(x1, x2, axes)
np.testing.assert_array_almost_equal(ms_result_np, np_result)
# 1D
shape_x1 = (300)
shape_x2 = (300)
axes = 1
x1 = np.random.random(shape_x1).astype(np.float16)
x2 = np.random.random(shape_x2).astype(np.float16)
x1_tensor = Tensor(x1, dtype=mindspore.float16)
x2_tensor = Tensor(x2, dtype=mindspore.float16)
network = NetTensorDot(axes)
ms_result_np = network(x1_tensor, x2_tensor).asnumpy()
np_result = np.tensordot(x1, x2, axes)
np.testing.assert_array_almost_equal(ms_result_np, np_result)
# 2D
shape_x1 = (100, 300)
shape_x2 = (300, 100)
axes = ([1], [0])
x1 = np.random.random(shape_x1).astype(np.float16)
x2 = np.random.random(shape_x2).astype(np.float16)
x1_tensor = Tensor(x1, dtype=mindspore.float16)
x2_tensor = Tensor(x2, dtype=mindspore.float16)
network = NetTensorDot(axes)
ms_result_np = network(x1_tensor, x2_tensor).asnumpy()
np_result = np.tensordot(x1, x2, axes)
np.testing.assert_array_almost_equal(ms_result_np, np_result)
# 3D
shape_x1 = (60, 30, 450)
shape_x2 = (450, 90, 30)
axes = ((1, 2), (2, 0))
x1 = np.random.random(shape_x1).astype(np.float16)
x2 = np.random.random(shape_x2).astype(np.float16)
x1_tensor = Tensor(x1, dtype=mindspore.float16)
x2_tensor = Tensor(x2, dtype=mindspore.float16)
network = NetTensorDot(axes)
ms_result_np = network(x1_tensor, x2_tensor).asnumpy()
np_result = np.tensordot(x1, x2, axes)
np.testing.assert_array_almost_equal(ms_result_np, np_result)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_tensor_dot_outer():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
np.random.seed(2746)
shape_x1 = (1, 2, 3) # incompatable dims for x1 and x2
shape_x2 = (4, 5, 6)
axes = 0 # outer product does not require multiplicable dims
x1 = np.random.random(shape_x1).astype(np.float32)
x2 = np.random.random(shape_x2).astype(np.float32)
x1_tensor = Tensor(x1, dtype=mindspore.float32)
x2_tensor = Tensor(x2, dtype=mindspore.float32)
network = NetTensorDot(axes)
ms_result_np = network(x1_tensor, x2_tensor).asnumpy()
np_result = np.tensordot(x1, x2, axes)
np.testing.assert_array_almost_equal(ms_result_np, np_result)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_tensor_dot_backprop():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
# TEST 1
shape_x1 = (2, 4, 2)
shape_x2 = (3, 2, 3)
axes = ((0,), (1,)) # select first N from
network = NetTensorDot(axes)
np.random.seed(115)
x1 = np.random.random(shape_x1).astype(np.float16)
np.random.seed(1467)
x2 = np.random.random(shape_x2).astype(np.float16)
x1_tensor = Tensor(x1, dtype=mindspore.float16)
x2_tensor = Tensor(x2, dtype=mindspore.float16)
np.random.seed(157)
grad = np.random.random((4, 2, 3, 3))
grad_tensor = Tensor(grad, dtype=mindspore.float16)
grad_network = GradNetwork(network)
dx1, dx2 = grad_network(x1_tensor, x2_tensor, grad_tensor)
dx1, dx2 = dx1.asnumpy(), dx2.asnumpy()
# precomputed
expect_dx1 = np.array([[[2.0293, 2.4473],
[2.9727, 1.4873],
[1.7910, 3.4727],
[2.4160, 1.7227]],
[[2.5547, 2.5039],
[3.4062, 2.3320],
[2.6270, 3.1543],
[2.1406, 1.7666]]])
expect_dx2 = np.array([[[2.1523, 2.9199, 0.8350],
[2.0254, 2.7734, 1.3213]],
[[2.6836, 2.4707, 1.0156],
[2.9746, 3.0254, 1.9199]],
[[1.8545, 1.7803, 1.3457],
[2.2676, 2.1797, 1.2764]]])
np.allclose(dx1, expect_dx1)
np.allclose(dx2, expect_dx2)
# TEST 2
shape_x1 = (10, 35)
shape_x2 = (20, 10)
axes = ((0,), (1,)) # select first N from
network = NetTensorDot(axes)
np.random.seed(215)
x1 = np.random.random(shape_x1).astype(np.float16)
np.random.seed(2467)
x2 = np.random.random(shape_x2).astype(np.float16)
x1_tensor = Tensor(x1, dtype=mindspore.float16)
x2_tensor = Tensor(x2, dtype=mindspore.float16)
np.random.seed(257)
grad = np.random.random((35, 20))
grad_tensor = Tensor(grad, dtype=mindspore.float16)
grad_network = GradNetwork(network)
dx1, dx2 = grad_network(x1_tensor, x2_tensor, grad_tensor)
dx1, dx2 = dx1.asnumpy(), dx2.asnumpy()
# precomputed
expect_dx1 = np.array([[5.9727, 4.6484, 5.1836, 4.3906, 5.1641, 5.1406, 5.1211, 6.5352, 4.9922,
4.4297, 4.4648, 6.5469, 6.2305, 4.8789, 6.8320, 5.3906, 4.7383, 6.0352,
4.7383, 4.4844, 5.3711, 6.2617, 4.6484, 5.8672, 4.7500, 6.0234, 3.6387,
5.3789, 5.9727, 5.7227, 6.0234, 4.9609, 5.0117, 5.4141, 5.1406],
[5.2305, 4.0078, 4.6328, 3.9238, 4.2773, 4.2539, 4.6797, 5.1289, 3.7910,
3.8887, 3.2930, 5.5898, 5.4219, 3.6211, 5.5234, 3.5391, 4.8516, 4.7539,
4.2500, 2.9785, 4.8867, 5.4648, 5.0195, 6.0195, 4.7109, 3.9727, 3.4922,
4.1484, 4.7969, 5.3555, 4.9414, 5.2969, 3.1992, 5.2031, 4.4648],
[5.2266, 5.2617, 5.3750, 4.7930, 4.9062, 5.4102, 4.9336, 6.9414, 4.4961,
4.4023, 4.7344, 5.8125, 4.9180, 4.7891, 5.9805, 5.2383, 4.6445, 6.1172,
4.8477, 3.7578, 4.3047, 5.7969, 4.5859, 6.0273, 4.3438, 4.7305, 4.0938,
4.8398, 5.8320, 5.3438, 5.3281, 4.8320, 4.0938, 4.9375, 5.3281],
[7.4297, 5.1484, 6.3477, 5.4844, 5.7852, 6.3906, 5.5234, 7.2383, 5.2969,
4.9844, 4.5625, 7.3047, 7.3789, 6.4453, 8.2266, 6.6172, 5.5547, 7.0234,
4.8594, 4.9531, 6.0469, 6.9258, 6.1055, 6.7539, 6.6953, 6.0430, 4.5117,
5.7344, 7.4297, 6.4219, 6.8125, 6.4141, 5.2773, 6.8828, 6.0430],
[5.7969, 4.7109, 5.8281, 4.5703, 5.5078, 6.4219, 4.8359, 7.1484, 4.2617,
4.8477, 4.2539, 5.6016, 6.4414, 5.7305, 6.4766, 5.4648, 4.5859, 6.5547,
5.5156, 3.3848, 5.1523, 5.5352, 4.9531, 6.5938, 5.2969, 4.6055, 5.2109,
4.4961, 5.8984, 5.4531, 5.8086, 5.7930, 5.0742, 5.4102, 4.9453],
[7.2188, 5.8789, 6.9453, 6.0039, 6.7188, 7.3359, 6.7695, 8.6172, 5.6680,
6.4219, 6.1836, 7.7695, 7.5391, 6.5312, 8.2812, 7.5352, 5.8867, 7.7070,
6.0039, 5.1172, 6.4844, 7.4297, 5.9219, 7.5078, 6.3125, 6.9805, 5.3750,
5.9805, 7.2148, 7.6484, 7.8828, 6.7695, 5.7109, 6.8828, 6.9023],
[5.7656, 4.3633, 4.5039, 4.4375, 4.3867, 5.4336, 4.3672, 5.5469, 3.5742,
4.0508, 3.7402, 5.9141, 5.7734, 4.5781, 5.6719, 4.5625, 4.5391, 5.1719,
4.3945, 3.4844, 4.9297, 5.7227, 4.8203, 5.8125, 4.8633, 4.3125, 3.6641,
4.3789, 5.6133, 5.1758, 4.9141, 5.8008, 4.0391, 5.8984, 4.3594],
[4.7734, 3.4238, 4.3477, 3.6270, 4.4883, 5.2031, 3.9023, 5.0078, 2.9355,
3.8477, 3.4648, 5.1445, 4.8398, 4.4297, 5.1641, 4.2422, 4.2695, 4.6992,
4.5039, 2.5176, 4.2500, 5.6680, 4.1875, 5.4141, 3.6094, 3.1758, 3.8398,
3.9180, 5.3320, 4.6523, 3.9531, 4.8281, 3.9863, 4.8867, 4.3711],
[6.7578, 5.3164, 6.0000, 4.4531, 5.8789, 6.3750, 5.1094, 7.0391, 4.5781,
4.8633, 4.5156, 6.6641, 6.3594, 5.5664, 6.9453, 5.5820, 5.1992, 6.9570,
5.3242, 3.8574, 5.1445, 6.0547, 5.0273, 6.9180, 5.1914, 4.6914, 4.6445,
5.1289, 5.8711, 6.2070, 6.1953, 5.7695, 4.7617, 5.5898, 4.9492],
[4.9180, 4.0117, 4.1211, 3.4629, 3.6445, 4.6602, 3.7031, 4.9062, 4.1133,
3.0020, 3.2246, 4.6562, 4.4727, 3.3828, 5.2695, 4.0078, 3.2559, 4.9688,
3.5742, 3.1133, 3.8223, 4.7578, 3.7949, 4.8438, 4.0664, 4.4336, 3.0957,
4.4375, 4.2969, 4.1758, 4.5234, 4.2930, 3.9434, 4.8281, 3.0703]])
expect_dx2 = np.array([[6.7930, 7.0000, 8.8203, 9.7031, 8.1250,
6.7422, 8.4844, 8.7031, 7.2891, 10.1484],
[8.5781, 8.1641, 9.9609, 9.2344, 9.3281,
8.1484, 9.8984, 9.0391, 7.9805, 11.0469],
[8.1016, 7.0781, 8.9688, 10.0938, 9.6641,
7.1523, 8.2969, 8.8594, 8.3047, 10.2578],
[7.0938, 7.3477, 9.3594, 8.2422, 7.9141,
6.5156, 8.2812, 8.2266, 6.9766, 8.5703],
[9.2891, 9.2500, 11.6875, 9.5234, 10.1172,
8.8125, 9.5781, 9.5547, 8.9688, 11.2266],
[9.3594, 7.7539, 9.2500, 9.2500, 8.1094,
8.0859, 8.7344, 8.2031, 8.5859, 10.3203],
[8.7344, 7.7227, 10.2578, 10.1641, 9.3984,
8.1719, 8.0156, 8.6953, 8.6797, 10.6875],
[8.8750, 7.9922, 10.2422, 10.3984, 9.5234,
8.5156, 8.7266, 8.8125, 8.2578, 10.2578],
[9.5703, 8.9844, 10.0547, 10.3047, 10.4062,
8.2422, 10.7031, 9.7891, 9.2969, 11.0078],
[9.2891, 9.5391, 10.5938, 10.5078, 9.8203,
8.5156, 9.0859, 9.0703, 8.7812, 10.8750],
[8.6094, 8.2734, 10.2734, 9.7891, 9.4531,
7.5820, 8.4609, 8.6094, 7.7578, 10.3438],
[8.2891, 8.7578, 9.3906, 9.6016, 9.4375,
7.1016, 8.6875, 8.1875, 8.2188, 9.3672],
[7.2969, 6.6953, 9.3984, 8.2422, 8.3438,
7.5547, 7.6445, 7.5820, 7.5156, 9.0781],
[8.3906, 7.3516, 8.5938, 9.2422, 8.7734,
8.0781, 9.1250, 7.8359, 7.7891, 10.9375],
[9.9219, 8.8281, 9.4141, 10.2500, 9.8047,
8.5234, 8.5391, 8.4609, 8.5859, 11.2422],
[6.8984, 6.4570, 8.0000, 6.4688, 7.4609,
6.6016, 7.0352, 6.6797, 6.5586, 7.7070],
[8.0625, 7.4805, 8.7578, 8.3281, 8.2188,
7.4023, 8.5312, 7.5312, 7.1445, 10.3750],
[7.7773, 6.6484, 9.1094, 8.0078, 7.8281,
7.1016, 8.2422, 8.1562, 6.8828, 10.3281],
[8.3281, 8.3672, 9.7656, 10.4922, 8.2500,
7.5625, | |
"""
Weyl Groups
AUTHORS:
- <NAME> (2008): initial version
- <NAME> (2008): initial version
- <NAME> (2008): initial version
- <NAME> (2008): initial version
- <NAME> (2013): LibGAP-based matrix groups
EXAMPLES:
More examples on Weyl Groups should be added here...
The Cayley graph of the Weyl Group of type ['A', 3]::
sage: w = WeylGroup(['A',3])
sage: d = w.cayley_graph(); d
Digraph on 24 vertices
sage: d.show3d(color_by_label=True, edge_size=0.01, vertex_size=0.03)
The Cayley graph of the Weyl Group of type ['D', 4]::
sage: w = WeylGroup(['D',4])
sage: d = w.cayley_graph(); d
Digraph on 192 vertices
sage: d.show3d(color_by_label=True, edge_size=0.01, vertex_size=0.03) #long time (less than one minute)
"""
#*****************************************************************************
# Copyright (C) 2008 <NAME> <bump at match.stanford.edu>,
# <NAME> <<EMAIL>>
# <NAME> <anne at math.ucdavis.edu>
# <NAME> <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.groups.matrix_gps.finitely_generated import FinitelyGeneratedMatrixGroup_gap
from sage.groups.matrix_gps.group_element import MatrixGroupElement_gap
from sage.groups.perm_gps.permgroup import PermutationGroup_generic
from sage.rings.all import ZZ, QQ
from sage.interfaces.gap import gap
from sage.misc.cachefunc import cached_method
from sage.combinat.root_system.cartan_type import CartanType
from sage.combinat.root_system.cartan_matrix import CartanMatrix
from sage.combinat.root_system.reflection_group_element import RealReflectionGroupElement
from sage.matrix.constructor import matrix, diagonal_matrix
from sage.combinat.root_system.root_lattice_realizations import RootLatticeRealizations
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.richcmp import richcmp, richcmp_not_equal
from sage.categories.all import WeylGroups, FiniteWeylGroups, AffineWeylGroups
from sage.categories.permutation_groups import PermutationGroups
from sage.sets.family import Family
from sage.matrix.constructor import Matrix
def WeylGroup(x, prefix=None, implementation='matrix'):
"""
Returns the Weyl group of the root system defined by the Cartan
type (or matrix) ``ct``.
INPUT:
- ``x`` - a root system or a Cartan type (or matrix)
OPTIONAL:
- ``prefix`` -- changes the representation of elements from matrices
to products of simple reflections
- ``implementation`` -- one of the following:
* ``'matrix'`` - as matrices acting on a root system
* ``"permutation"`` - as a permutation group acting on the roots
EXAMPLES:
The following constructions yield the same result, namely
a weight lattice and its corresponding Weyl group::
sage: G = WeylGroup(['F',4])
sage: L = G.domain()
or alternatively and equivalently::
sage: L = RootSystem(['F',4]).ambient_space()
sage: G = L.weyl_group()
sage: W = WeylGroup(L)
Either produces a weight lattice, with access to its roots and
weights.
::
sage: G = WeylGroup(['F',4])
sage: G.order()
1152
sage: [s1,s2,s3,s4] = G.simple_reflections()
sage: w = s1*s2*s3*s4; w
[ 1/2 1/2 1/2 1/2]
[-1/2 1/2 1/2 -1/2]
[ 1/2 1/2 -1/2 -1/2]
[ 1/2 -1/2 1/2 -1/2]
sage: type(w) == G.element_class
True
sage: w.order()
12
sage: w.length() # length function on Weyl group
4
The default representation of Weyl group elements is as matrices.
If you prefer, you may specify a prefix, in which case the
elements are represented as products of simple reflections.
::
sage: W=WeylGroup("C3",prefix="s")
sage: [s1,s2,s3]=W.simple_reflections() # lets Sage parse its own output
sage: s2*s1*s2*s3
s1*s2*s3*s1
sage: s2*s1*s2*s3 == s1*s2*s3*s1
True
sage: (s2*s3)^2==(s3*s2)^2
True
sage: (s1*s2*s3*s1).matrix()
[ 0 0 -1]
[ 0 1 0]
[ 1 0 0]
::
sage: L = G.domain()
sage: fw = L.fundamental_weights(); fw
Finite family {1: (1, 1, 0, 0), 2: (2, 1, 1, 0), 3: (3/2, 1/2, 1/2, 1/2), 4: (1, 0, 0, 0)}
sage: rho = sum(fw); rho
(11/2, 5/2, 3/2, 1/2)
sage: w.action(rho) # action of G on weight lattice
(5, -1, 3, 2)
We can also do the same for arbitrary Cartan matrices::
sage: cm = CartanMatrix([[2,-5,0],[-2,2,-1],[0,-1,2]])
sage: W = WeylGroup(cm)
sage: W.gens()
(
[-1 5 0] [ 1 0 0] [ 1 0 0]
[ 0 1 0] [ 2 -1 1] [ 0 1 0]
[ 0 0 1], [ 0 0 1], [ 0 1 -1]
)
sage: s0,s1,s2 = W.gens()
sage: s1*s2*s1
[ 1 0 0]
[ 2 0 -1]
[ 2 -1 0]
sage: s2*s1*s2
[ 1 0 0]
[ 2 0 -1]
[ 2 -1 0]
sage: s0*s1*s0*s2*s0
[ 9 0 -5]
[ 2 0 -1]
[ 0 1 -1]
Same Cartan matrix, but with a prefix to display using simple reflections::
sage: W = WeylGroup(cm, prefix='s')
sage: s0,s1,s2 = W.gens()
sage: s0*s2*s1
s2*s0*s1
sage: (s1*s2)^3
1
sage: (s0*s1)^5
s0*s1*s0*s1*s0*s1*s0*s1*s0*s1
sage: s0*s1*s2*s1*s2
s2*s0*s1
sage: s0*s1*s2*s0*s2
s0*s1*s0
TESTS::
sage: TestSuite(WeylGroup(["A",3])).run()
sage: TestSuite(WeylGroup(["A",3,1])).run() # long time
sage: W = WeylGroup(['A',3,1])
sage: s = W.simple_reflections()
sage: w = s[0]*s[1]*s[2]
sage: w.reduced_word()
[0, 1, 2]
sage: w = s[0]*s[2]
sage: w.reduced_word()
[2, 0]
sage: W = groups.misc.WeylGroup(['A',3,1])
"""
if implementation == "permutation":
return WeylGroup_permutation(x, prefix)
elif implementation != "matrix":
raise ValueError("invalid implementation")
if x in RootLatticeRealizations:
return WeylGroup_gens(x, prefix=prefix)
try:
ct = CartanType(x)
except TypeError:
ct = CartanMatrix(x) # See if it is a Cartan matrix
if ct.is_finite():
return WeylGroup_gens(ct.root_system().ambient_space(), prefix=prefix)
return WeylGroup_gens(ct.root_system().root_space(), prefix=prefix)
class WeylGroup_gens(UniqueRepresentation,
FinitelyGeneratedMatrixGroup_gap):
@staticmethod
def __classcall__(cls, domain, prefix=None):
return super(WeylGroup_gens, cls).__classcall__(cls, domain, prefix)
def __init__(self, domain, prefix):
"""
EXAMPLES::
sage: G = WeylGroup(['B',3])
sage: TestSuite(G).run()
sage: cm = CartanMatrix([[2,-5,0],[-2,2,-1],[0,-1,2]])
sage: W = WeylGroup(cm)
sage: TestSuite(W).run() # long time
"""
self._domain = domain
if self.cartan_type().is_affine():
category = AffineWeylGroups()
elif self.cartan_type().is_finite():
category = FiniteWeylGroups()
else:
category = WeylGroups()
if self.cartan_type().is_irreducible():
category = category.Irreducible()
self.n = domain.dimension() # Really needed?
self._prefix = prefix
# FinitelyGeneratedMatrixGroup_gap takes plain matrices as input
gens_matrix = [self.morphism_matrix(self.domain().simple_reflection(i))
for i in self.index_set()]
from sage.libs.all import libgap
libgap_group = libgap.Group(gens_matrix)
degree = ZZ(self.domain().dimension())
ring = self.domain().base_ring()
FinitelyGeneratedMatrixGroup_gap.__init__(
self, degree, ring, libgap_group, category=category)
@cached_method
def cartan_type(self):
"""
Returns the CartanType associated to self.
EXAMPLES::
sage: G = WeylGroup(['F',4])
sage: G.cartan_type()
['F', 4]
"""
return self.domain().cartan_type()
@cached_method
def index_set(self):
"""
Returns the index set of self.
EXAMPLES::
sage: G = WeylGroup(['F',4])
sage: G.index_set()
(1, 2, 3, 4)
sage: G = WeylGroup(['A',3,1])
sage: G.index_set()
(0, 1, 2, 3)
"""
return self.cartan_type().index_set()
# Should be implemented in (morphisms of) modules with basis
def morphism_matrix(self, f):
return matrix(self.domain().base_ring(), [f(b).to_vector()
for b in self.domain().basis()]).transpose()
def from_morphism(self, f):
return self._element_constructor_(self.morphism_matrix(f))
@cached_method
def simple_reflections(self):
"""
Returns the simple reflections of self, as a family.
EXAMPLES:
There are the simple reflections for the symmetric group::
sage: W=WeylGroup(['A',2])
sage: s = W.simple_reflections(); s
Finite family {1: [0 1 0]
[1 0 0]
[0 0 1], 2: [1 0 0]
[0 0 1]
[0 1 0]}
As a special feature, for finite irreducible root systems,
s[0] gives the reflection along the highest root::
sage: s[0]
[0 0 1]
[0 1 0]
[1 0 0]
We now look at some further examples::
sage: W=WeylGroup(['A',2,1])
sage: W.simple_reflections()
Finite family {0: [-1 1 1]
[ 0 1 0]
[ 0 0 1], 1: [ 1 0 0]
[ 1 -1 1]
[ 0 0 1], 2: [ 1 0 0]
[ 0 1 0]
[ 1 1 -1]}
sage: W = WeylGroup(['F',4])
sage: [s1,s2,s3,s4] = W.simple_reflections()
sage: w = s1*s2*s3*s4; w
[ 1/2 1/2 1/2 1/2]
[-1/2 1/2 1/2 -1/2]
[ 1/2 1/2 -1/2 -1/2]
[ 1/2 -1/2 1/2 -1/2]
sage: s4^2 == W.one()
True
sage: type(w) == W.element_class
True
"""
return self.domain().simple_reflections().map(self.from_morphism)
def reflections(self):
"""
Return the reflections of ``self``.
The reflections of a Coxeter group `W` are the conjugates of
the simple reflections. They are in bijection with the positive
roots, for given a positive root, we may have the reflection in
the hyperplane orthogonal to it. This method returns a family
indexed by the positive roots taking values in the reflections.
This requires ``self`` to be a finite Weyl group.
.. NOTE::
Prior to :trac:`20027`, the reflections were the keys
of the family and the values were the positive roots.
EXAMPLES::
sage: W = WeylGroup("B2", prefix="s")
sage: refdict = W.reflections(); refdict
Finite family {(1, -1): s1, (1, 1): s2*s1*s2, (1, 0): s1*s2*s1, (0, 1): s2}
sage: [r+refdict[r].action(r) for r in refdict.keys()]
[(0, 0), (0, 0), (0, 0), (0, 0)]
sage: W = WeylGroup(['A',2,1], prefix="s")
sage: W.reflections()
Lazy family (real root to reflection(i))_{i in
Positive real roots of type ['A', 2, 1]}
TESTS::
sage: CM = CartanMatrix([[2,-6],[-1,2]])
sage: W = WeylGroup(CM, prefix='s')
sage: W.reflections()
Traceback (most recent call last):
...
NotImplementedError: only implemented for finite and affine Cartan types
"""
prr = self.domain().positive_real_roots()
def to_elt(alp):
ref = self.domain().reflection(alp)
m = Matrix([ref(x).to_vector() for x in self.domain().basis()])
return self(m.transpose())
return Family(prr, to_elt, name="real root to reflection")
def _repr_(self):
"""
EXAMPLES::
sage: WeylGroup(['A', 1])
Weyl Group of type ['A', 1] (as a | |
import random
import pygame
import math
import os
import pygameMenu
from pygameMenu.locals import *
# *****************************************************************************
# Info text for the About Menu
# *****************************************************************************
ABOUT = ['Evolving Enemies:',
'A Game where the Enemies evolve using Genetic Algorithms',
PYGAMEMENU_TEXT_NEWLINE,
'Author: <NAME>',
PYGAMEMENU_TEXT_NEWLINE,
'Email: <EMAIL>']
# *****************************************************************************
# Info text for the Help Menu
# *****************************************************************************
HELP = ['Controls:',
'Use Arrow keys to move player ship left/right.',
PYGAMEMENU_TEXT_NEWLINE,
'Press SpaceBar to fire Missile.']
# *****************************************************************************
# Defining the constants and variables
# *****************************************************************************
SCREEN_WIDTH = 1024
SCREEN_HEIGHT = 768
WINDOW_START_POSITION_X = 200
WINDOW_START_POSITION_Y = 0
PLAYER_FIRE_TORPEDO = False
PLAYER_TORPEDO_FIRING_RATE = 5
GENERATE_GAME_OBJECTS = True
MUTATION_RATE_MATRIX = ['0.2']
MUTATION_RATE = 0.2
COLOR_BACKGROUND = (100, 100, 100)
COLOR_BLACK = (0, 0, 0)
COLOR_WHITE = (250, 250, 250)
COLOR_BLUE = (0, 0, 250)
COLOR_RED = (250,0,0)
MENU_BACKGROUND_COLOR = (0, 0, 0)
GAME_DIFFICULTY = ['EASY']
RUN_GAME = True
EPISODE_COUNT = 0
ENEMY_MISSILE_TIMER = 5000
# *****************************************************************************
# The start window position for the game window
# *****************************************************************************
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (WINDOW_START_POSITION_X,WINDOW_START_POSITION_Y)
# *****************************************************************************
# Initialize Pygame and create screen and objects
# *****************************************************************************
pygame.init()
pygame.display.set_caption("Evolving Enemies")
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
STAR_FIELD_TIMER = pygame.time.get_ticks()
episode_info_text = pygame.font.Font(pygameMenu.fonts.FONT_FRANCHISE, 30)
# *****************************************************************************
# The initial population for the Genetic Algorithm
# *****************************************************************************
A0 = [0, 0, 0, 0, 0, 0, 0]
A1 = [0, 0, 0, 0, 0, 0, 0]
A2 = [0, 0, 0, 0, 0, 0, 0]
A3 = [0, 0, 0, 0, 0, 0, 0]
A4 = [0, 0, 0, 0, 0, 0, 0]
A5 = [0, 0, 0, 0, 0, 0, 0]
initial_population = [A0,A1,A2,A3,A4,A5]
chromosome_length = len(A0)-1
# *****************************************************************************
# Creates the pygame sprite groups and enemy ship lists
# *****************************************************************************
player_list = pygame.sprite.Group()
all_sprites_list = pygame.sprite.Group()
player_projectile_list = pygame.sprite.Group()
enemy_projectile_list = pygame.sprite.Group()
enemy_list = pygame.sprite.Group()
enemy_shield_list = pygame.sprite.Group()
star_field_list = pygame.sprite.Group()
current_enemy_population = []
enemy_ship_list = []
# *****************************************************************************
# Creates Genetic Algorithm class
# *****************************************************************************
class Genetic_Algorithm():
# Initialize the class
def __init__(self, initial_population,MUTATION_RATE,chromosome_length):
self.initial_population = initial_population
self.MUTATION_RATE = MUTATION_RATE
self.chromosome_length = chromosome_length
print ('Initial population: ' + str(self.initial_population))
# Determine fitness of chromosome by looking at element 6
def current_fitness(self,elem):
return elem[6]
# Sort the chromosomes in the population by descending order of the fitness value
def sort_by_fitness(self,current_population):
current_population.sort(reverse = True,key = self.current_fitness)
return current_population
# Select parents from the sorted population
def selection(self,current_population):
fitness_list = []
for chromosome_index in range(6):
chromosome = current_population[chromosome_index]
# Select parents with fitness less than 20
# This allows chromosomes with lower fitness to be selected as well
if chromosome[6] < 20:
fitness_list.append(chromosome)
if len(fitness_list) > 2:
p1 = fitness_list[0]
p2 = fitness_list[1]
early_stop = False
# If all chromosomes have fitness more than 20,
# select the two parents with the highest fitness value.
# early_stop value can be used to stop the algorithm
else:
p1 = current_population[0]
p2 = current_population[1]
early_stop = True
return p1,p2,early_stop
# Determine the crossover sites for the parents and creates offspring
def crossover(self,p1,p2):
crossover_site = random.randint(1,5)
ch1 = p1[0:crossover_site]+p2[crossover_site:self.chromosome_length]
ch1.append(p1[6])
ch2 = p2[0:crossover_site]+p1[crossover_site:self.chromosome_length]
ch2.append(p2[6])
return ch1,ch2
# Randomly change the values of genes depending on the mutation rate
def mutation(self,chromosome,mutatation_rate):
mutated_gene_list = []
no_of_mutating_genes = math.floor(mutatation_rate*self.chromosome_length)
while True:
if no_of_mutating_genes == 0:
break
# Randomly select a gene from genes 0 to 5 to mutate
random_gene_to_mutate = random.randint(0, 5)
# Check to see if this gene has already been mutated
if mutated_gene_list.count(random_gene_to_mutate) == 0:
# Mutate gene
if chromosome[random_gene_to_mutate] == 0:
chromosome[random_gene_to_mutate] = 1
else:
chromosome[random_gene_to_mutate] = 0
# Decrease genes to mutate counter
no_of_mutating_genes = no_of_mutating_genes-1
# Add the gene to the mutated gene list
mutated_gene_list.append(random_gene_to_mutate)
return chromosome
# Generate a new population with current parameters
def generate_new_population(self):
current_population = self.initial_population
sorted_current_population = self.sort_by_fitness(current_population)
selected_parent_1,selected_parent_2,early_stop = self.selection(sorted_current_population)
generated_child_1,generated_child_2 = self.crossover(selected_parent_1,selected_parent_2)
mutated_child_1 = self.mutation(generated_child_1,self.MUTATION_RATE)
mutated_child_2 = self.mutation(generated_child_2,self.MUTATION_RATE)
current_population[4] = mutated_child_1
current_population[5] = mutated_child_2
sorted_generated_population = self.sort_by_fitness(current_population)
return sorted_generated_population
# *****************************************************************************
# Creates Player class
# *****************************************************************************
class Player(pygame.sprite.Sprite):
# Initialize the class and the sprites
def __init__(self,x,y):
super().__init__()
self.images = []
self.images.append(pygame.image.load('Sprites/Player_Ship/Player_Ship_Idle_1.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Player_Ship_Idle_2.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Player_Ship_Idle_3.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Player_Ship_Idle_4.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Player_Ship_Idle_5.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Player_Ship_Moving_Left_1.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Player_Ship_Moving_Left_2.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Player_Ship_Moving_Left_3.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Player_Ship_Moving_Left_4.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Player_Ship_Moving_Left_5.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Player_Ship_Moving_Right_1.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Player_Ship_Moving_Right_2.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Player_Ship_Moving_Right_3.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Player_Ship_Moving_Right_4.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Player_Ship_Moving_Right_5.png'))
self.index = 0
self.image = self.images[self.index]
self.rect = pygame.Rect(x,y,self.image.get_rect()[2],self.image.get_rect()[3])
# Update function is called once every frame
def update(self,surface):
# Check left/right arrow key pressed
key = pygame.key.get_pressed()
if key[pygame.K_LEFT]:
# Change index for animation of sprite moving left
self.index += 1
if self.index > 9 or self.index < 5:
self.index = 5
# Move sprite to the left
self.rect.x = self.rect.x - 2
# Stop sprite from moving off screen left
if self.rect.x < 0:
self.rect.x = 0
elif key[pygame.K_RIGHT]:
# Change index for animation of sprite moving right
self.index += 1
if self.index >= len(self.images) or self.index < 10 :
self.index = 10
# Move sprite to the right
self.rect.x = self.rect.x + 2
# Stop sprite from moving off screen right
if self.rect.x > 960:
self.rect.x = 960
else:
# Change index for animation of sprite moving forward
self.index += 1
if self.index > 4:
self.index = 0
# Assign index of sprite
self.image = self.images[self.index]
# *****************************************************************************
# Creates Player Projectile class
# *****************************************************************************
class Player_Projectile(pygame.sprite.Sprite):
# Initialize the class and the sprites
def __init__(self,player_projectile_x,player_projectile_y):
super().__init__()
self.images = []
self.images.append(pygame.image.load('Sprites/Player_Ship/Missile_3_Flying_000.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Missile_3_Flying_001.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Missile_3_Flying_002.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Missile_3_Flying_003.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Missile_3_Flying_004.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Missile_3_Flying_005.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Missile_3_Flying_006.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Missile_3_Flying_007.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Missile_3_Flying_008.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Missile_3_Flying_009.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Missile_3_Explosion_000.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Missile_3_Explosion_001.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Missile_3_Explosion_002.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Missile_3_Explosion_003.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Missile_3_Explosion_004.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Missile_3_Explosion_005.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Missile_3_Explosion_006.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Missile_3_Explosion_007.png'))
self.images.append(pygame.image.load('Sprites/Player_Ship/Missile_3_Explosion_008.png'))
self.index = 0
self.image = self.images[self.index]
self.rect = pygame.Rect(player_projectile_x,player_projectile_y,self.image.get_rect()[2],self.image.get_rect()[3])
# Update function is called once every frame
def update(self,surface):
# Change index for animation of projectile moving forward
self.index += 1
if self.index > 9:
self.index = 0
# Move sprite forward
self.rect.y = self.rect.y - 2
# Assign index of sprite
self.image = self.images[self.index]
# Called once a player projectile hits enemy
def player_projectile_explosion(self):
# Draws an explosion on the location of collision
self.index = 15
self.image = self.images[self.index]
self.rect.x = self.rect.x - 25
self.rect.y = self.rect.y - 20
player_projectile_list.draw(screen)
pygame.display.update()
# *****************************************************************************
# Creates Enemy Ship class
# *****************************************************************************
class Enemy_Ship(pygame.sprite.Sprite):
# Initialize the class and the sprites
def __init__(self,chromosome,enemy_ship_x,enemy_ship_y,enemy_ship_tag):
super().__init__()
self.images = []
self.move_enemy = False
self.move_left = True
self.enemy_fire_timer = pygame.time.get_ticks()
self.enemy_survive_timer = pygame.time.get_ticks()
self.chaingun_enemy = False
self.bottom_shield = None
self.top_shield = None
self.left_shield = None
self.right_shield = None
self.enemy_ship_tag = enemy_ship_tag
self.chromosome = chromosome
self.fitness_value = self.chromosome[6]
# Give enemy ability to shoot back
if self.chromosome[0] == 1:
self.chaingun_enemy = True
# Give enemy ability to move
if self.chromosome[1] == 1:
self.move_enemy = True
# Give enemy bottom shield
if self.chromosome[2] == 1:
bottom_shield = Enemy_Shields(0, enemy_ship_x+10, enemy_ship_y+65, chromosome,self.enemy_ship_tag)
all_sprites_list.add(bottom_shield)
enemy_shield_list.add(bottom_shield)
self.bottom_shield = bottom_shield
# Give enemy top shield
if self.chromosome[3] == 1:
top_shield = Enemy_Shields(1,enemy_ship_x+10,enemy_ship_y-10,chromosome,self.enemy_ship_tag)
all_sprites_list.add(top_shield)
enemy_shield_list.add(top_shield)
self.top_shield = top_shield
# Give enemy left shield
if self.chromosome[4] == 1:
left_shield = Enemy_Shields(2,enemy_ship_x-15,enemy_ship_y-10,chromosome,self.enemy_ship_tag)
all_sprites_list.add(left_shield)
enemy_shield_list.add(left_shield)
self.left_shield = left_shield
# Give enemy right shield
if self.chromosome[5] == 1:
right_shield = Enemy_Shields(3,enemy_ship_x+110,enemy_ship_y-10,chromosome,self.enemy_ship_tag)
all_sprites_list.add(right_shield)
enemy_shield_list.add(right_shield)
self.right_shield = right_shield
self.images.append(pygame.image.load('Sprites/Enemy_Ship/Enemy_Ship.png'))
self.images.append(pygame.image.load('Sprites/Enemy_Ship/Ship6_Explosion_000.png'))
self.images.append(pygame.image.load('Sprites/Enemy_Ship/Ship6_Explosion_004.png'))
self.images.append(pygame.image.load('Sprites/Enemy_Ship/Ship6_Explosion_005.png'))
self.images.append(pygame.image.load('Sprites/Enemy_Ship/Ship6_Explosion_007.png'))
self.images.append(pygame.image.load('Sprites/Enemy_Ship/Ship6_Explosion_009.png'))
self.images.append(pygame.image.load('Sprites/Enemy_Ship/Ship6_Explosion_011.png'))
self.images.append(pygame.image.load('Sprites/Enemy_Ship/Ship6_Explosion_013.png'))
self.images.append(pygame.image.load('Sprites/Enemy_Ship/Ship6_Explosion_016.png'))
self.images.append(pygame.image.load('Sprites/Enemy_Ship/Ship6_Explosion_017.png'))
self.images.append(pygame.image.load('Sprites/Enemy_Ship/Ship6_Explosion_019.png'))
self.images.append(pygame.image.load('Sprites/Enemy_Ship/Ship6_Explosion_021.png'))
self.index = 0
self.image = self.images[self.index]
self.rect = pygame.Rect(enemy_ship_x,enemy_ship_y,self.image.get_rect()[2]-10,self.image.get_rect()[3]-25)
# Update function is called once every frame
def update(self,screen):
global ENEMY_MISSILE_TIMER
# Change index for animation of projectile moving forward
self.index += 1
# Move enemy left/right depending on the location of the sprite on screen
if self.move_enemy == True:
if self.move_left == True:
# Stop sprite from moving off screen left
if self.rect.x < 15:
self.move_left = False
self.rect.x = self.rect.x - 3
else:
# Stop sprite from moving off screen right
if self.rect.x > 890:
self.move_left = True
self.rect.x = self.rect.x + 3
# Fire projectiles at a time interval if enemy can shoot
# This time interval decreases as game difficulty increases
if pygame.time.get_ticks() - self.enemy_fire_timer >= ENEMY_MISSILE_TIMER and self.chaingun_enemy == True:
enemy_projectile = Enemy_Projectile(self.enemy_ship_tag,self.rect.x+75,self.rect.y+75)
enemy_projectile_list.add(enemy_projectile)
all_sprites_list.add(enemy_projectile)
self.enemy_fire_timer = pygame.time.get_ticks()
# Increase the fitness value of enemies survived for more than 5 seconds
# The longer the enemy survives, the higher the fitness
if pygame.time.get_ticks() - self.enemy_survive_timer >= 5000:
self.fitness_value = self.fitness_value + 2
self.chromosome[6] = self.fitness_value
self.enemy_survive_timer = pygame.time.get_ticks()
# Called when player projectile hits enemy or enemy projectile hits player
def update_fitness(self,Message):
global GENERATE_GAME_OBJECTS
# When player projectile hits enemy shield
if Message == 'Shield_Hit':
# Reduce fitness value by one and assign new fitness to chromosome
self.fitness_value = self.fitness_value - 1
if self.fitness_value < 0:
self.fitness_value = 0
self.chromosome[6] = self.fitness_value
# When player projectile hits | |
'TensorFlow v1.14 (CUDA)',
'tensorflow-v1.14-tensorrt': 'TensorFlow v1.14 (TensorRT-static)',
'tensorflow-v1.14-tensorrt-dynamic': 'TensorFlow v1.14 (TensorRT-dynamic)',
}
if library_backend == 'tensorflow-v1.14-cpu':
status = 'RDI'
elif library_backend == 'tflite-v1.15.0' or library_backend == 'tensorrt-v6.0':
status = 'unofficial'
else:
status = 'available'
template = deepcopy(platform_templates[platform])
template.update({
'division' : division,
'submitter' : 'dividiti', # 'dividiti' if platform != 'velociti' else 'dividiti, Politecnico di Milano'
'status' : status,
'framework' : frameworks[library_backend]
})
if (not library_backend.startswith('tensorrt') and not library_backend.startswith('tensorflow') and not library_backend.endswith('opencl')) or library_backend.endswith('cpu'):
template.update({
'accelerator_frequency' : '-',
'accelerator_memory_capacity' : '-',
'accelerator_memory_configuration': '-',
'accelerator_model_name' : '-',
'accelerator_on-chip_memories': '-',
'accelerators_per_node' : '0',
})
division_systems[division_system] = template
print("=" * 100)
print(division_system)
print("=" * 100)
pprint(template)
print("-" * 100)
print("")
# <a id="implementations"></a>
# ## Implementations
# ### Image classification
# In[ ]:
# Generate implementation_benchmarks dictionary.
implementation_benchmarks = {}
# Default `system_desc_id_imp.json` (to catch uninitialized descriptions)
default_implementation_benchmark_json = {
"input_data_types": "required",
"retraining": "required",
"starting_weights_filename": "required",
"weight_data_types": "required",
"weight_transformations": "required"
}
# For each image classification implementation.
for implementation in [ 'image-classification-tflite', 'image-classification-armnn-tflite' ]:
# Add MobileNet.
implementation_mobilenet = implementation+'-'+'mobilenet'
implementation_benchmarks[implementation_mobilenet] = {
"input_data_types": "fp32",
"weight_data_types": "fp32",
"retraining": "no",
"starting_weights_filename": "https://zenodo.org/record/2269307/files/mobilenet_v1_1.0_224.tgz",
"weight_transformations": "TFLite"
}
# Add MobileNet quantized.
implementation_mobilenet_quantized = implementation+'-'+'mobilenet-quantized'
implementation_benchmarks[implementation_mobilenet_quantized] = {
"input_data_types": "uint8",
"weight_data_types": "uint8",
"retraining": "no",
"starting_weights_filename": "https://zenodo.org/record/2269307/files/mobilenet_v1_1.0_224_quant.tgz",
"weight_transformations": "TFLite"
}
# Add ResNet.
implementation_resnet = implementation+'-'+'resnet'
implementation_benchmarks[implementation_resnet] = {
"input_data_types": "fp32",
"weight_data_types": "fp32",
"retraining": "no",
"starting_weights_filename": "https://zenodo.org/record/2535873/files/resnet50_v1.pb",
"weight_transformations": "TF -> TFLite"
}
# Add any MobileNets-v1,v2 model.
def add_implementation_mobilenet(implementation_benchmarks, version, multiplier, resolution, quantized=False):
base_url = 'https://zenodo.org/record/2269307/files' if version == 1 else 'https://zenodo.org/record/2266646/files'
url = '{}/mobilenet_v{}_{}_{}{}.tgz'.format(base_url, version, multiplier, resolution, '_quant' if quantized else '')
benchmark = 'mobilenet-v{}-{}-{}{}'.format(version, multiplier, resolution, '-quantized' if quantized else '')
if quantized and (version != 1 or implementation != 'image-classification-tflite'):
return
if implementation == 'image-classification-tflite':
weights_transformations = 'TFLite'
elif implementation == 'image-classification-armnn-tflite':
weights_transformations = 'TFLite -> ArmNN'
else:
raise "Unknown implementation '%s'!" % implementation
implementation_benchmark = implementation+'-'+benchmark
implementation_benchmarks[implementation_benchmark] = {
"input_data_types": "uint8" if quantized else "fp32",
"weight_data_types": "uint8" if quantized else "fp32",
"retraining": "no",
"starting_weights_filename": url,
"weight_transformations": weights_transformations
}
return
# MobileNet-v1.
version = 1
for multiplier in [ 1.0, 0.75, 0.5, 0.25 ]:
for resolution in [ 224, 192, 160, 128 ]:
add_implementation_mobilenet(implementation_benchmarks, version, multiplier, resolution, quantized=False)
add_implementation_mobilenet(implementation_benchmarks, version, multiplier, resolution, quantized=True)
# MobileNet-v2.
version = 2
for multiplier in [ 1.0, 0.75, 0.5, 0.35 ]:
for resolution in [ 224, 192, 160, 128, 96 ]:
add_implementation_mobilenet(implementation_benchmarks, version, multiplier, resolution)
add_implementation_mobilenet(implementation_benchmarks, version=2, multiplier=1.3, resolution=224)
add_implementation_mobilenet(implementation_benchmarks, version=2, multiplier=1.4, resolution=224)
for implementation in [ 'image-classification-tensorrt-loadgen-py']:
# Add ResNet.
implementation_resnet = implementation+'-'+'resnet'
implementation_benchmarks[implementation_resnet] = {
"input_data_types": "fp32",
"weight_data_types": "fp32",
"retraining": "no",
"starting_weights_filename": "https://zenodo.org/record/2535873/files/resnet50_v1.pb",
"weight_transformations": "ONNX -> TensorRT"
}
# Add MobileNet.
implementation_mobilenet = implementation+'-'+'mobilenet'
implementation_benchmarks[implementation_mobilenet] = {
"input_data_types": "int8",
"weight_data_types": "int8",
"retraining": "no",
"starting_weights_filename": "https://zenodo.org/record/2269307/files/mobilenet_v1_1.0_224.tgz",
"weight_transformations": "TF -> TensorRT"
}
# ### Object detection
# In[ ]:
object_detection_benchmarks = {
'rcnn-nas-lowproposals' : {
"name" : "Faster-RCNN-NAS lowproposals",
"url" : "http://download.tensorflow.org/models/object_detection/faster_rcnn_nas_lowproposals_coco_2018_01_28.tar.gz",
"width" : 1200,
"height" : 1200,
},
'rcnn-resnet50-lowproposals' : {
"name" : "Faster-RCNN-ResNet50 lowproposals",
"url" : "http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet50_lowproposals_coco_2018_01_28.tar.gz",
"width" : 1024,
"height" : 600,
},
'rcnn-resnet101-lowproposals' : {
"name" : "Faster-RCNN-ResNet101 lowproposals",
"url" : "http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_lowproposals_coco_2018_01_28.tar.gz",
"width" : 1024,
"height" : 600,
},
'rcnn-inception-resnet-v2-lowproposals' : {
"name" : "Faster-RCNN-Inception-ResNet-v2 lowproposals",
"url" : "http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_resnet_v2_atrous_lowproposals_coco_2018_01_28.tar.gz",
"width" : 1024,
"height" : 600,
},
'rcnn-inception-v2' : {
"name" : "Faster-RCNN Inception-v2",
"url" : "http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_v2_coco_2018_01_28.tar.gz",
"width" : 1024,
"height" : 600,
},
'ssd-inception-v2' : {
"name" : "SSD-Inception-v2",
"url" : "http://download.tensorflow.org/models/object_detection/ssd_inception_v2_coco_2018_01_28.tar.gz",
"width" : 300,
"height" : 300,
},
'ssd-mobilenet-v1-quantized-mlperf' : {
"name" : "MLPerf SSD-MobileNet",
"url" : "https://zenodo.org/record/3361502/files/ssd_mobilenet_v1_coco_2018_01_28.tar.gz",
"width" : 300,
"height" : 300,
"provenance" : "Google",
},
'ssd-mobilenet-v1-non-quantized-mlperf' : {
"name" : "MLPerf SSD-MobileNet quantized",
"url" : "https://zenodo.org/record/3252084/files/mobilenet_v1_ssd_8bit_finetuned.tar.gz",
"width" : 300,
"height" : 300,
"provenance" : "Habana"
},
'ssd-mobilenet-v1-fpn' : {
"name" : "SSD-MobileNet-v1 FPN SBP",
"url" : "http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03.tar.gz",
"width" : 640,
"height" : 640,
},
'ssd-resnet50-fpn' : {
"name" : "SSD-ResNet50-v1 FPN SBP",
"url" : "http://download.tensorflow.org/models/object_detection/ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03.tar.gz",
"width" : 640,
"height" : 640,
},
'ssdlite-mobilenet-v2' : {
"name" : "SSDLite-MobileNet-v2",
"url" : "http://download.tensorflow.org/models/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz",
"width" : 300,
"height" : 300,
},
'yolo-v3' : {
"name" : "YOLO-v3",
"url" : "https://zenodo.org/record/3386327/files/yolo_v3_coco.tar.gz",
"width" : 416,
"height" : 416,
"provenance" : "https://github.com/YunYang1994/tensorflow-yolov3/"
}
}
# For each object detection implementation.
for implementation in [ 'mlperf-inference-vision' ]:
for benchmark in object_detection_benchmarks.keys():
implementation_benchmark = implementation+'-'+benchmark
implementation_benchmarks[implementation_benchmark] = {
"input_data_types": "fp32",
"weight_data_types": "fp32",
"retraining": "no",
"starting_weights_filename": object_detection_benchmarks[benchmark]['url'],
# "name" : object_detection_benchmarks[benchmark]['name'], # submission checker complains about "unknwon field name"
"weight_transformations": "None (TensorFlow)"
}
from pprint import pprint
# pprint(implementation_benchmarks)
# In[ ]:
implementation_readmes = {}
implementation_readmes['image-classification-tflite'] = """# MLPerf Inference - Image Classification - TFLite
This C++ implementation uses TFLite to run TFLite models for Image Classification on CPUs.
## Links
- [Jupyter notebook](https://nbviewer.jupyter.org/urls/dl.dropbox.com/s/1xlv5oacgobrfd4/mlperf-inference-v0.5-dividiti.ipynb)
- [Source code](https://github.com/ctuning/ck-mlperf/tree/master/program/image-classification-tflite-loadgen).
- [Instructions](https://github.com/mlperf/inference/blob/master/v0.5/classification_and_detection/optional_harness_ck/classification/tflite/README.md).
"""
implementation_readmes['image-classification-armnn-tflite'] = """# MLPerf Inference - Image Classification - ArmNN-TFLite
This C++ implementation uses ArmNN with the TFLite frontend to run TFLite models for Image Classification on Arm Cortex CPUs and Arm Mali GPUs.
## Links
- [Jupyter notebook](https://nbviewer.jupyter.org/urls/dl.dropbox.com/s/1xlv5oacgobrfd4/mlperf-inference-v0.5-dividiti.ipynb)
- [Source code](https://github.com/ctuning/ck-mlperf/tree/master/program/image-classification-armnn-tflite-loadgen).
- [Instructions](https://github.com/ARM-software/armnn-mlperf/blob/master/README.md).
"""
implementation_readmes['image-classification-tensorrt-loadgen-py'] = """# MLPerf Inference - Image Classification - TensorRT
This Python implementation uses TensorRT to run models Image Classification on Arm Cortex CPUs and Arm Mali GPUs.
### Links
- [Source code](https://github.com/ctuning/ck-mlperf/tree/master/program/image-classification-tensorrt-loadgen-py).
"""
implementation_readmes['mlperf-inference-vision'] = """# MLPerf Inference - Object Detection - TensorFlow
This Python implementation is the official MLPerf Inference vision application, modified to support other
object detection models and run with TensorRT.
## Links
- [CK wrapper](https://github.com/ctuning/ck-object-detection/tree/master/program/mlperf-inference-vision).
- [vision_with_ck branch in dividiti's fork of mlperf/inference](https://github.com/dividiti/inference/tree/vision_with_ck).
- [Docker image with instructions](https://github.com/ctuning/ck-mlperf/tree/master/docker/mlperf-inference-vision-with-ck.tensorrt.ubuntu-18.04).
- [Jupyter notebook](https://nbviewer.jupyter.org/urls/dl.dropbox.com/s/1xlv5oacgobrfd4/mlperf-inference-v0.5-dividiti.ipynb)
"""
# In[ ]:
implementation_paths = {}
for implementation in [ 'image-classification-tflite', 'image-classification-armnn-tflite', 'image-classification-tensorrt-loadgen-py', 'mlperf-inference-vision' ]:
implementation_uoa = implementation
if implementation.startswith('image-classification'):
if implementation.endswith('tflite'):
implementation_uoa += '-loadgen'
repo_uoa = 'ck-mlperf'
else: # TODO: move to ck-mlperf, then no need for special case.
repo_uoa = 'ck-object-detection'
r = ck.access({'action':'find', 'repo_uoa':repo_uoa, 'module_uoa':'program', 'data_uoa':implementation_uoa})
if r['return']>0:
print('Error: %s' % r['error'])
exit(1)
implementation_paths[implementation] = r['path']
# In[ ]:
measurements_readmes = {}
task = 'image-classification'
for division_upper in [ 'Closed', 'Open' ]:
division_lower = division_upper.lower()
measurements_readmes[division_lower+'-'+task] = '''# MLPerf Inference - {} Division - Image Classification
We performed our measurements using automated, customizable, portable and reproducible
[Collective Knowledge](http://cknowledge.org) workflows. Our workflows automatically
install dependencies (models, datasets, etc.), preprocess input data in the correct way,
and so on.
## CK repositories
As CK is always evolving, it is hard to pin particular revisions of all repositories.
The most relevant repositories and their latest revisions on the submission date (11/Oct/2019):
- [ck-mlperf](https://github.com/ctuning/ck-mlperf) @ [ee77cfd](https://github.com/ctuning/ck-mlperf/commit/ee77cfd3ddfa30739a8c2f483fe9ba83a233a000) (contains programs integrated with LoadGen, model packages and scripts).
- [ck-env](https://github.com/ctuning/ck-env) @ [f9ac337](https://github.com/ctuning/ck-env/commit/f9ac3372cdc82fa46b2839e45fc67848ab4bac03) (contains dataset descriptions, preprocessing methods, etc.)
- [ck-tensorflow](https://github.com/ctuning/ck-tensorflow) @ [eff8bec](https://github.com/ctuning/ck-tensorflow/commit/eff8bec192021162e4a336dbd3e795afa30b7d26) (contains TFLite packages).
- [armnn-mlperf](https://github.com/arm-software/armnn-mlperf) @ [42f44a2](https://github.com/ARM-software/armnn-mlperf/commit/42f44a266b6b4e04901255f46f6d34d12589208f) (contains ArmNN/ArmCL packages).
## Links
- [Bash script](https://github.com/ctuning/ck-mlperf/tree/master/script/mlperf-inference-v0.5.{}.image-classification) used to invoke benchmarking on Linux systems or Android devices.
'''.format(division_upper, division_lower)
task = 'object-detection'
for division_upper in [ 'Closed', 'Open' ]:
division_lower = division_upper.lower()
measurements_readmes[division_lower+'-'+task] = '''# MLPerf Inference - {} Division - Object Detection
We performed our measurements using automated, customizable, portable and reproducible
[Collective Knowledge](http://cknowledge.org) workflows. Our workflows automatically
install dependencies (models, datasets, etc.), preprocess input data in the correct way,
and so on.
## CK repositories
As CK is always evolving, it is hard to pin particular revisions of all repositories.
The most relevant repositories and their latest revisions on the submission date (18/Oct/2019):
- [ck-mlperf](https://github.com/ctuning/ck-mlperf) @ [ef1fced](https://github.com/ctuning/ck-mlperf/commit/ef1fcedd495fd03b5ad6d62d62c8ba271854f2ad) (contains the CK program wrapper, MLPerf SSD-MobileNet model packages and scripts).
- [ck-object-detection](https://github.com/ctuning/ck-object-detection) @ [780d328](https://github.com/ctuning/ck-object-detection/commit/780d3288ec19656cb60c5ad39b2486bbf0fbf97a) (contains most model packages)
- [ck-env](https://github.com/ctuning/ck-env) @ [5af9fbd](https://github.com/ctuning/ck-env/commit/5af9fbd93ad6c6465b631716645ad9442a333442) (contains dataset descriptions, preprocessing methods, etc.)
## Links
- [Docker image with instructions](https://github.com/ctuning/ck-mlperf/tree/master/docker/mlperf-inference-vision-with-ck.tensorrt.ubuntu-18.04).
- [Bash script](https://github.com/ctuning/ck-mlperf/tree/master/script/mlperf-inference-v0.5.{}.object-detection) used to invoke benchmarking via the Docker image.
'''.format(division_upper, division_lower)
# In[ ]:
# Snapshot of https://github.com/dividiti/inference/blob/61220457dec221ed1984c62bd9d382698bd71bc6/v0.5/mlperf.conf
mlperf_conf_6122045 = '''
# The format of this config file is 'key = value'.
# The key has the format 'model.scenario.key'. Value is mostly int64_t.
# Model maybe '*' as wildcard. In that case the value applies to all models.
# All times are in milli seconds
*.SingleStream.target_latency = 10
*.SingleStream.target_latency_percentile = 90
*.SingleStream.min_duration = 60000
*.SingleStream.min_query_count = 1024
*.MultiStream.target_qps = 20
*.MultiStream.target_latency_percentile = 99
*.MultiStream.samples_per_query = 4
*.MultiStream.max_async_queries = 1
*.MultiStream.target_latency = 50
*.MultiStream.min_duration = 60000
*.MultiStream.min_query_count = 270336
ssd-resnet34.MultiStream.target_qps = 15
ssd-resnet34.MultiStream.target_latency = 66
gnmt.MultiStream.min_query_count = 90112
gnmt.MultiStream.target_latency = 100
gnmt.MultiStream.target_qps = 10
gnmt.MultiStream.target_latency_percentile = 97
*.Server.target_qps = 1.0
*.Server.target_latency = 10
*.Server.target_latency_percentile = 99
*.Server.target_duration = 0
*.Server.min_duration = 60000
*.Server.min_query_count = 270336
resnet50.Server.target_latency = 15
ssd-resnet34.Server.target_latency = 100
gnmt.Server.min_query_count = 90112
gnmt.Server.target_latency = 250
gnmt.Server.target_latency_percentile = 97
*.Offline.target_qps = 1.0
*.Offline.target_latency_percentile = 90
*.Offline.min_duration = 60000
*.Offline.min_query_count = 1
'''
# <a id="get"></a>
# ## Get the experimental data
# Download experimental data and add CK repositories as follows.
# <a id="get_image_classification_closed"></a>
# ### Image Classification - | |
#!/bin/env python3
import argparse
import esprima
import json
import logging
import os
import re
import sys
import traceback
logger = logging.getLogger(__name__)
err_context = 3
def get_req_body_elems(obj, elems):
if obj.type in ['FunctionExpression', 'ArrowFunctionExpression']:
get_req_body_elems(obj.body, elems)
elif obj.type == 'BlockStatement':
for s in obj.body:
get_req_body_elems(s, elems)
elif obj.type == 'TryStatement':
get_req_body_elems(obj.block, elems)
elif obj.type == 'ExpressionStatement':
get_req_body_elems(obj.expression, elems)
elif obj.type == 'MemberExpression':
left = get_req_body_elems(obj.object, elems)
right = obj.property.name
if left == 'req.body' and right not in elems:
elems.append(right)
return '{}.{}'.format(left, right)
elif obj.type == 'VariableDeclaration':
for s in obj.declarations:
get_req_body_elems(s, elems)
elif obj.type == 'VariableDeclarator':
if obj.id.type == 'ObjectPattern':
# get_req_body_elems() can't be called directly here:
# const {isAdmin, isNoComments, isCommentOnly} = req.body;
right = get_req_body_elems(obj.init, elems)
if right == 'req.body':
for p in obj.id.properties:
name = p.key.name
if name not in elems:
elems.append(name)
else:
get_req_body_elems(obj.init, elems)
elif obj.type == 'Property':
get_req_body_elems(obj.value, elems)
elif obj.type == 'ObjectExpression':
for s in obj.properties:
get_req_body_elems(s, elems)
elif obj.type == 'CallExpression':
for s in obj.arguments:
get_req_body_elems(s, elems)
elif obj.type == 'ArrayExpression':
for s in obj.elements:
get_req_body_elems(s, elems)
elif obj.type == 'IfStatement':
get_req_body_elems(obj.test, elems)
if obj.consequent is not None:
get_req_body_elems(obj.consequent, elems)
if obj.alternate is not None:
get_req_body_elems(obj.alternate, elems)
elif obj.type in ('LogicalExpression', 'BinaryExpression', 'AssignmentExpression'):
get_req_body_elems(obj.left, elems)
get_req_body_elems(obj.right, elems)
elif obj.type in ('ReturnStatement', 'UnaryExpression'):
get_req_body_elems(obj.argument, elems)
elif obj.type == 'Literal':
pass
elif obj.type == 'Identifier':
return obj.name
elif obj.type == 'FunctionDeclaration':
pass
else:
print(obj)
return ''
def cleanup_jsdocs(jsdoc):
# remove leading spaces before the first '*'
doc = [s.lstrip() for s in jsdoc.value.split('\n')]
# remove leading stars
doc = [s.lstrip('*') for s in doc]
# remove leading empty lines
while len(doc) and not doc[0].strip():
doc.pop(0)
# remove terminating empty lines
while len(doc) and not doc[-1].strip():
doc.pop(-1)
return doc
class JS2jsonDecoder(json.JSONDecoder):
def decode(self, s):
result = super().decode(s) # result = super(Decoder, self).decode(s) for Python 2.x
return self._decode(result)
def _decode(self, o):
if isinstance(o, str) or isinstance(o, unicode):
try:
return int(o)
except ValueError:
return o
elif isinstance(o, dict):
return {k: self._decode(v) for k, v in o.items()}
elif isinstance(o, list):
return [self._decode(v) for v in o]
else:
return o
def load_return_type_jsdoc_json(data):
regex_replace = [(r'\n', r' '), # replace new lines by spaces
(r'([\{\s,])(\w+)(:)', r'\1"\2"\3'), # insert double quotes in keys
(r'(:)\s*([^:\},\]]+)\s*([\},\]])', r'\1"\2"\3'), # insert double quotes in values
(r'(\[)\s*([^{].+)\s*(\])', r'\1"\2"\3'), # insert double quotes in array items
(r'^\s*([^\[{].+)\s*', r'"\1"')] # insert double quotes in single item
for r, s in regex_replace:
data = re.sub(r, s, data)
return json.loads(data)
class EntryPoint(object):
def __init__(self, schema, statements):
self.schema = schema
self.method, self._path, self.body = statements
self._jsdoc = None
self._doc = {}
self._raw_doc = None
self.path = self.compute_path()
self.method_name = self.method.value.lower()
self.body_params = []
if self.method_name in ('post', 'put'):
get_req_body_elems(self.body, self.body_params)
# replace the :parameter in path by {parameter}
self.url = re.sub(r':([^/]*)Id', r'{\1}', self.path)
self.url = re.sub(r':([^/]*)', r'{\1}', self.url)
# reduce the api name
# get_boards_board_cards() should be get_board_cards()
tokens = self.url.split('/')
reduced_function_name = []
for i, token in enumerate(tokens):
if token in ('api'):
continue
if (i < len(tokens) - 1 and # not the last item
tokens[i + 1].startswith('{')): # and the next token is a parameter
continue
reduced_function_name.append(token.strip('{}'))
self.reduced_function_name = '_'.join(reduced_function_name)
# mark the schema as used
schema.used = True
def compute_path(self):
return self._path.value.rstrip('/')
def log(self, message, level):
if self._raw_doc is None:
logger.log(level, 'in {},'.format(self.schema.name))
logger.log(level, message)
return
logger.log(level, 'in {}, lines {}-{}'.format(self.schema.name,
self._raw_doc.loc.start.line,
self._raw_doc.loc.end.line))
logger.log(level, self._raw_doc.value)
logger.log(level, message)
def error(self, message):
return self.log(message, logging.ERROR)
def warn(self, message):
return self.log(message, logging.WARNING)
def info(self, message):
return self.log(message, logging.INFO)
@property
def doc(self):
return self._doc
@doc.setter
def doc(self, doc):
'''Parse the JSDoc attached to an entry point.
`jsdoc` will not get these right as they are not attached to a method.
So instead, we do our custom parsing here (yes, subject to errors).
The expected format is the following (empty lines between entries
are ignored):
/**
* @operation name_of_entry_point
* @tag: a_tag_to_add
* @tag: an_other_tag_to_add
* @summary A nice summary, better in one line.
*
* @description This is a quite long description.
* We can use *mardown* as the final rendering is done
* by slate.
*
* indentation doesn't matter.
*
* @param param_0 description of param 0
* @param {string} param_1 we can also put the type of the parameter
* before its name, like in JSDoc
* @param {boolean} [param_2] we can also tell if the parameter is
* optional by adding square brackets around its name
*
* @return Documents a return value
*/
Notes:
- name_of_entry_point will be referenced in the ToC of the generated
document. This is also the operationId used in the resulting openapi
file. It needs to be uniq in the namesapce (the current schema.js
file)
- tags are appended to the current Schema attached to the file
'''
self._raw_doc = doc
self._jsdoc = cleanup_jsdocs(doc)
def store_tag(tag, data):
# check that there is something to store first
if not data.strip():
return
# remove terminating whitespaces and empty lines
data = data.rstrip()
# parameters are handled specially
if tag == 'param':
if 'params' not in self._doc:
self._doc['params'] = {}
params = self._doc['params']
param_type = None
try:
name, desc = data.split(maxsplit=1)
except ValueError:
desc = ''
if name.startswith('{'):
param_type = name.strip('{}')
if param_type == 'Object':
# hope for the best
param_type = 'object'
elif param_type not in ['string', 'number', 'boolean', 'integer', 'array', 'file']:
self.warn('unknown type {}\n allowed values: string, number, boolean, integer, array, file'.format(param_type))
try:
name, desc = desc.split(maxsplit=1)
except ValueError:
desc = ''
optional = name.startswith('[') and name.endswith(']')
if optional:
name = name[1:-1]
# we should not have 2 identical parameter names
if tag in params:
self.warn('overwriting parameter {}'.format(name))
params[name] = (param_type, optional, desc)
if name.endswith('Id'):
# we strip out the 'Id' from the form parameters, we need
# to keep the actual description around
name = name[:-2]
if name not in params:
params[name] = (param_type, optional, desc)
return
# 'tag' can be set several times
if tag == 'tag':
if tag not in self._doc:
self._doc[tag] = []
self._doc[tag].append(data)
return
# 'return' tag is json
if tag == 'return_type':
try:
data = load_return_type_jsdoc_json(data)
except json.decoder.JSONDecodeError:
pass
# we should not have 2 identical tags but @param or @tag
if tag in self._doc:
self.warn('overwriting tag {}'.format(tag))
self._doc[tag] = data
# reset the current doc fields
self._doc = {}
# first item is supposed to be the description
current_tag = 'description'
current_data = ''
for line in self._jsdoc:
if line.lstrip().startswith('@'):
tag, data = line.lstrip().split(maxsplit=1)
if tag in ['@operation', '@summary', '@description', '@param', '@return_type', '@tag']:
# store the current data
store_tag(current_tag, current_data)
current_tag = tag.lstrip('@')
current_data = ''
line = data
else:
self.info('Unknown tag {}, ignoring'.format(tag))
current_data += line + '\n'
store_tag(current_tag, current_data)
@property
def summary(self):
if 'summary' in self._doc:
# new lines are not allowed
return self._doc['summary'].replace('\n', ' ')
return None
def doc_param(self, name):
if 'params' in self._doc and name in self._doc['params']:
return self._doc['params'][name]
return None, None, None
def print_openapi_param(self, name, indent):
ptype, poptional, pdesc = self.doc_param(name)
if pdesc is not None:
print('{}description: |'.format(' ' * indent))
print('{}{}'.format(' ' * (indent + 2), pdesc))
else:
print('{}description: the {} value'.format(' ' * indent, name))
if ptype is not None:
print('{}type: {}'.format(' ' * indent, ptype))
else:
print('{}type: string'.format(' ' * indent))
if poptional:
print('{}required: false'.format(' ' * indent))
else:
print('{}required: true'.format(' ' * indent))
@property
def operationId(self):
if 'operation' in self._doc:
return self._doc['operation']
return '{}_{}'.format(self.method_name, self.reduced_function_name)
@property
def description(self):
if 'description' in self._doc:
return self._doc['description']
return None
@property
def returns(self):
if 'return_type' in self._doc:
return self._doc['return_type']
return None
@property
def tags(self):
tags = []
if self.schema.fields is not None:
tags.append(self.schema.name)
if 'tag' in self._doc:
tags.extend(self._doc['tag'])
return tags
def print_openapi_return(self, obj, indent):
if isinstance(obj, dict):
print('{}type: object'.format(' ' * indent))
print('{}properties:'.format(' ' * indent))
for k, v in obj.items():
print('{}{}:'.format(' ' * (indent + 2), k))
self.print_openapi_return(v, indent + 4)
elif isinstance(obj, list):
if len(obj) > 1:
self.error('Error while parsing @return tag, an array should have only one type')
print('{}type: array'.format(' ' * indent))
print('{}items:'.format(' ' * indent))
| |
<reponame>Technovisio/Code_Crack
#****************************************Coffee Shop Interaction Software**********************************************
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
import smtplib
from email.message import EmailMessage
import random
from random import randint
import time
import datetime as dt
import sqlite3
con=sqlite3.connect('CafeDB.db')
cur=con.cursor()
D={'COFFEE','TEA','PEPSI_REG','COKE_REG','COLD_COFFEE','COFFEE_LATTE',
'CHOCO_MILK','ORANGILLO','MANGO_JUICE','VANILLA_SHAKE','CHOCO_SHAKE','LEMONADE',
'AMERICANO_1_SHOT','EXPRESSO_2_SHOTS','MANGO_SHAKE','BANANA_SHAKE'}
S={'PATIES','CHOCO_PASTRY','CREAM_ROLL','CHIPS','SANDWICH_VEG','SANDWICH_NONVEG',
'FRENCH_FRIES','CHICKEN_BURGER','VEG_BURGER','CUP_NOODLES','STUFFED_ROLL','MUFFINS','CHEESE_PIZZA','REG_PIZZA'}
P={'CHOCO_NUTS', 'CHOCO_CHIPS', 'CHOCOLATE_NUTS', 'MILK_COOKIES', 'BUTTER_COOKIES', 'SNICKERS', 'SALTED_NUTS', 'CHIPS'}
DRINKS={'COFFEE':30, 'TEA':20, 'PEPSI_REG':35, 'COKE_REG':40, 'COLD_COFFEE':50, 'COFFEE_LATTE':30, 'CHOCO_MILK':25, 'ORANGILLO':20, 'MANGO_JUICE':30,
'VANILLA_SHAKE':40, 'CHOCO_SHAKE':30, 'LEMONADE':20, 'AMERICANO_1_SHOT':30, 'EXPRESSO_2_SHOTS':35, 'MANGO_SHAKE':40, 'BANANA_SHAKE':45}
SNACKS={'PATIES':15, 'CHOCO_PASTRY':35, 'CREAM_ROLL':20, 'SANDWICH_VEG':40, 'SANDWICH_NONVEG':60, 'FRENCH_FRIES':40,
'CHICKEN_BURGER':50, 'CUP_NOODLES':35, 'STUFFED_ROLL':30, 'MUFFINS':20, 'CHEESE_PIZZA':79, 'REG_PIZZA':59}
CRAZY_PACKS={'CHOCO_NUTS':40, 'CHOCO_CHIPS':25, 'CHOCOLATE_NUTS':45, 'MILK_COOKIES':45, 'BUTTER_COOKIES':50, 'SNICKERS':15, 'SALTED_NUTS':30, 'CHIPS':30}
COFFEE=DRINKS['COFFEE']
TEA=DRINKS['TEA']
PEPSI_REG=DRINKS['PEPSI_REG']
COKE_REG=DRINKS['COKE_REG']
COLD_COFFEE=DRINKS['COLD_COFFEE']
COFFEE_LATTE=DRINKS['COFFEE_LATTE']
CHOCO_MILK=DRINKS['CHOCO_MILK']
ORANGILLO=DRINKS['ORANGILLO']
MANGO_JUICE=DRINKS['MANGO_JUICE']
VANILLA_SHAKE=DRINKS['VANILLA_SHAKE']
CHOCO_SHAKE=DRINKS['CHOCO_SHAKE']
LEMONADE=DRINKS['LEMONADE']
AMERICANO_1_SHOT=DRINKS['AMERICANO_1_SHOT']
EXPRESSO_2_SHOTS=DRINKS['EXPRESSO_2_SHOTS']
MANGO_SHAKE=DRINKS['MANGO_SHAKE']
BANANA_SHAKE=DRINKS['BANANA_SHAKE']
PATIES=SNACKS['PATIES']
CHOCO_PASTRY=SNACKS['CHOCO_PASTRY']
CREAM_ROLL=SNACKS['CREAM_ROLL']
SANDWICH_VEG=SNACKS['SANDWICH_VEG']
SANDWICH_NONVEG=SNACKS['SANDWICH_NONVEG']
FRENCH_FRIES=SNACKS['FRENCH_FRIES']
CHICKEN_BURGER=SNACKS['CHICKEN_BURGER']
CUP_NOODLES=SNACKS['CUP_NOODLES']
STUFFED_ROLL=SNACKS['STUFFED_ROLL']
MUFFINS=SNACKS['MUFFINS']
CHEESE_PIZZA=SNACKS['CHEESE_PIZZA']
REG_PIZZA=SNACKS['REG_PIZZA']
CHOCO_NUTS=CRAZY_PACKS['CHOCO_NUTS']
CHOCO_CHIPS=CRAZY_PACKS['CHOCO_CHIPS']
CHOCOLATE_NUTS=CRAZY_PACKS['CHOCOLATE_NUTS']
MILK_COOKIES=CRAZY_PACKS['MILK_COOKIES']
BUTTER_COOKIES=CRAZY_PACKS['BUTTER_COOKIES']
SNICKERS=CRAZY_PACKS['SNICKERS']
SALTED_NUTS=CRAZY_PACKS['SALTED_NUTS']
CHIPS=CRAZY_PACKS['CHIPS']
flag=0
quote='Always be positive, think positive!'
print("\t\t\t|_|CAFE SHOP!!\n")
while True:
try:
ch=input("You are User(U)\n or Admin(A) or New(N): \n\t")
ch=ch.upper().strip()
if ch=='A':
print("\n\tWELCOME TO ADMIN PANEL!!\n\n")
adname=input('Enter your name: ') #For admin control enter adname='ADN'
adname=adname.upper().strip()
if adname=='BIPUL':
otp=list(random.sample(range(100000,999999),1))
em=input("Enter your mail id: ")
with smtplib.SMTP('smtp.gmail.com',587) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login('<EMAIL>', '<PASSWORD>')
sub = 'Cafe Shop Verification!!'
body = 'Your verification Code is: G- '
msg = f'subject: {sub}\n\n{body}{otp}'
smtp.sendmail('<EMAIL>', em, msg)
ver=int(input("Enter 6 digit verification code: G- "))
if ver==otp[0] :
print("Welcome to Admin control! {}\n".format(adname))
quote=input("\n\tEnter quote for the day: ")
op=input("What would you want to do: \n(U)'Update' items or \n(V)'View' items or \n(E)'Exit Control': ")
op=op.upper().strip()
if op=='U':
upd=input('What would you want to update(\n\t1. )DRINKS(\n\t2. )SNACKS(\n\t3. )CRAZY_PACKS): ')
upd=upd.upper().strip()
if upd==1:
inp=input("Enter new item to be update: ")
inp=inp.upper().strip()
val=int(input("Enter new value of item: "))
DRINKS[inp]=val
locals().update(DRINKS)
D.add(inp)
print(DRINKS)
continue
elif upd==2:
inp=input("Enter new item to be update: ")
inp=inp.upper().strip()
val=int(input("Enter new value of item: "))
SNACKS[inp]=val
locals().update(SNACKS)
S.add(inp)
print(SNACKS)
continue
elif upd==3:
inp=input("Enter new item to be update: ")
inp=inp.upper().strip()
val=int(input("Enter new value of item: "))
SNACKS[inp]=val
locals().update(CRAZY_PACKS)
P.add(inp)
print(CRAZY_PACKS)
continue
else:
print("you can't proceed!")
continue
elif op=='V':
print('DRINKS','SNACKS','CRAZY_PACKS',sep='\n')
cu=con.execute("SELECT * FROM Transac")
for i in cu:
print(i,sep='\n\n')
continue
elif op=='E':
print('Exting the System...........!')
time.sleep(1)
break
else:
print("\tYou didn't opt any particular operation!!")
continue
else:
print("Sorry!Wrong otp!")
continue
elif adname=='ADN':
otp=list(random.sample(range(100000,999999),1))
em=input("Enter your mail id: ")
with smtplib.SMTP('smtp.gmail.com',587) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login('<EMAIL>', 'thikhai@123')
sub = 'Cafe Shop Verification!!'
body = 'Your verification Code is: G- '
msg = f'subject: {sub}\n\n{body}{otp}'
smtp.sendmail('<EMAIL>', em, msg)
ver=int(input("Enter 6 digit verification code: G- "))
if ver==otp[0] :
print("Welcome to Admin control! {}\n".format(adname))
quote=input("\n\tEnter quote for the day: ")
op=input("What would you want to do: \n(U)'Update' items or \n(V)'View' items or \n(E)'Exit Control': ")
op=op.upper().strip()
if op=='U':
upd=input('What would you want to update(\n\t1. )DRINKS(\n\t2. )SNACKS(\n\t3. )CRAZY_PACKS): ')
upd=upd.upper().strip()
if upd==1:
inp=input("Enter new item to be update: ")
inp=inp.upper().strip()
val=int(input("Enter new value of item: "))
DRINKS[inp]=val
locals().update(DRINKS)
D.add(inp)
print(DRINKS)
continue
elif upd==2:
inp=input("Enter new item to be update: ")
inp=inp.upper().strip()
val=int(input("Enter new value of item: "))
SNACKS[inp]=val
locals().update(SNACKS)
S.add(inp)
print(SNACKS)
continue
elif upd==3:
inp=input("Enter new item to be update: ")
inp=inp.upper().strip()
val=int(input("Enter new value of item: "))
SNACKS[inp]=val
locals().update(CRAZY_PACKS)
P.add(inp)
print(CRAZY_PACKS)
continue
else:
print("you can't proceed!")
continue
elif op=='V':
print('DRINKS','SNACKS','CRAZY_PACKS',sep='\n')
cu=con.execute("SELECT * FROM Transac")
for i in cu:
print(i,sep='\n\n')
continue
elif op=='E':
print('Exting the System...........!')
time.sleep(1)
break
else:
print("\tYou didn't opt any particular operation!!")
continue
else:
print("Sorry!Wrong otp!")
continue
else:
print("YOU DENIED ADMIN CONTROL!")
continue
continue
elif ch=='U':
print("Welcome User!!\n\n")
print("\t\t\t!! Cafe Shop !!\t\t\t"+"\nMay I Help you!!\n")
name=input("What's your name?\t")
name=name.upper()
print("Welcome to Cafe Shop! {0}".format(name))
print("Quote of the day: ",quote)
#Cost=eval(x)
while True:
menu=input("you wanna to see menu card Yes/No: ")
menu=menu.upper().strip()
if menu=='YES':
print(list(DRINKS.items()))
print("\n")
print(list(SNACKS.items()))
print("\n")
print(list(CRAZY_PACKS.items()))
print("\n")
break
elif menu=='NO':
print('You didn\'t opt for menu card!')
break
else:
print('Wrong Input')
print("Please select a valid option!!\tTry Again!")
continue
while True:
try:
Choice=input('What would you like to have?\n(D)for DRINKS\n(S)for SNACKS\n(P)for CRAZY_PACKS')
Choice=Choice.upper().strip()
print(Choice)
if Choice=='D':
i=input('Enter your order! ')
i=i.upper()
g=i.split(" ")
h=set(g)
t=tuple(g)
print(h)
x=i.replace(" ","+",2)
if set(g).intersection(D):
flag+=1
print("item is available in DRINKS !")
print(x+"\n",eval(x))
Cost=eval(x)
GST=(5*Cost)/100
Final_Price=Cost+GST
print("Overall Charges with GST(5%) included: ",Final_Price,"Rs/-")
break
else:
flag
print("Sorry! Please order something else from Menu Card!\nItem is not present in list!")
continue
elif Choice=='S':
i=input('Enter your order! ')
i=i.upper()
g=i.split(" ")
h=set(g)
t=tuple(g)
print(h)
x=i.replace(" ","+",2)
if set(g).intersection(S):
flag+=1
print("item is available in SNACKS !")
print(x+"\n",eval(x))
Cost=eval(x)
GST=(5*Cost)/100
Final_Price=Cost+GST
print("Overall Charges with GST(5%) included: ",Final_Price,"Rs/-")
break
else:
flag
print("Sorry! Please order something else from Menu Card!\nItem is not present in list!")
continue
elif Choice=='P':
i=input('Enter your order! ')
i=i.upper()
g=i.split(" ")
h=set(g)
t=tuple(g)
print(h)
x=i.replace(" ","+",2)
if set(g).intersection(P):
flag+=1
print("item is available in CRAZY_PACKS !")
print(x+"\n",eval(x))
Cost=eval(x)
GST=(5*Cost)/100
Final_Price=Cost+GST
print("Overall Charges with GST(5%) included: ",Final_Price,"Rs/-")
break
else:
flag
print("Sorry! Please order something else from Menu Card!\nItem is not present in list!")
continue
else:
flag
print("Sorry wrong input! Please Try anything else!")
continue
except Exception as e:
print(e)
break
while True:
if flag==0:
break
else:
print("\t\t\t !!Payment Gateway!! \t\t\t")
Payment_Method=input("Please enter Payment Mode: (Cash\nCoffee Bucks\nNet Banking\nPaytm\nDebit Card)\t\t")
Payment_Method=Payment_Method.upper().strip()
if Payment_Method=='CASH':
print("\nOverall Charges with GST(5%) included: ",Final_Price,"Rs/-")
Process=input("For Payment Press (Proceed) or (Cancel)")
Process=Process.upper().strip()
if Process=='PROCEED':
counter=randint(1,6)
token=randint(1,20)
csh=int(input('Enter the cash amount in form of paper currency(single one note at once) to pay! '))
if csh!=0 and csh!=10 and csh!=20 and csh!=50 and csh!=100 and csh!=200 and csh!=500 and csh!=2000 :
print("Enter a valid currency!!")
continue
else:
try:
counter=randint(1,6)
token=randint(1,30)
if csh<Final_Price:
print("Amount is less than payable!!\n")
break
else:
change=csh-Final_Price
change=int(change)
time.sleep(1.5)
code='ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'
tran=''
for c in range(11):
tran+=random.choice(code)
print('\n\tYour transaction id: ',tran)
print('\n\tPlease collect the change: ',change)
print('\n\nYour token no is: {}, & Please visit Counter No.: {}'.format(token,counter))
time.sleep(1.5)
d=dt.datetime.now()
print(d)
p=input('\nDo you want to print a reciept?[Yes/No]')
p=p.lower()
except Exception as e:
print(e)
break
if p=='yes':
p=f'Cafe_{tran}.pdf'
ca=canvas.Canvas(f'Cafe_{tran}.pdf',pagesize=letter)
ca.setLineWidth(.3)
ca.setFont('Helvetica', 11)
ca.line(25,790,595,790)
ca.line(25,790,25,330)
ca.line(595,790,595,330)
ca.drawString(435,750,"{}".format(d))
ca.line(435,747,590,747)
ca.drawString(30,750,'|_|CAFE SHOP!!')
ca.drawString(30,735,'OF BREAK-ZONE INDUSTRIES')
ca.drawString(30,705,'RECEIVED BY:')
ca.drawString(120,705,":-{}".format(name))
ca.line(120,700,580,700)
ca.drawString(200,680, "***\t\t\tCash Memo\t\t\t***")
ca.drawString(50,650, "Items Purchased: {}".format(t))
ca.line(140,645,590,645)
ca.drawString(50,620, "Price of Items Purchased: __________________________________")
ca.drawString(445,620,":-{}".format(Cost))
ca.drawString(50,595, "TAX on Items Purchased: _______________________________")
ca.drawString(445,595,":-{}".format(GST))
ca.drawString(50,570, "Total Price including GST(5%) on deal: _____________________________")
ca.drawString(445,570,":-Rs.{}/-".format(Final_Price))
ca.drawString(50,545, "Mode of Payment: _______________________________")
ca.drawString(445,545,":-{}".format(Payment_Method))
ca.drawString(50,520, "Your transaction id: _________________________________________")
ca.drawString(445,520,":-{}".format(tran))
ca.drawString(50,495, "Amount Paid by customer at counter: _______________________________")
ca.drawString(445,495,":-{}".format(csh))
ca.drawString(50,470, "Change returned to customer: _______________________________")
ca.drawString(445,470,":-{}".format(change))
ca.drawString(400,450,'Counter NO:')
ca.drawString(510,450,":-{}".format(counter))
ca.line(505,445,530,445)
ca.drawString(400,420,'Token No:')
ca.drawString(510,420,":-{}".format(token))
ca.line(505,415,530,415)
ca.drawString(50,390, "***\t\t\tPayment Successful!\t\t\t***")
ca.drawString(200,370, "******Have a Nice DAY!******")
ca.drawString(50,350, "************************************************************************************************")
ca.line(25,330,595,330)
ca.save()
t=str(t)
cur.execute("INSERT INTO Transac(customer,items,pay_method,date,transaction_id,total_price,amount_paid,change,reciept) VALUES(?,?,?,?,?,?,?,?,?)",(name,t,Payment_Method,d,tran,Final_Price,csh,change,p))
con.commit()
try:
msg = EmailMessage()
msg['Subject'] = 'Cafe Shop Invoice!!'
msg['From'] = '<EMAIL>'
msg['To'] = input('Enter your Address:\t')
msg.set_content('Invoice Attached......!!')
files = [p]
for file in files:
with open(file,'rb') as f:
file_data = f.read()
file_name = f.name
msg.add_attachment(file_data, maintype='application', subtype='octet-stream', filename=file_name)
break
with smtplib.SMTP_SSL('smtp.gmail.com',465) as smtp:
'''smtp.ehlo()
smtp.starttls()
smtp.ehlo()'''
smtp.login('<EMAIL>', '<PASSWORD>')
smtp.send_message(msg)
except Exception as e:
print(e)
'''cu=con.execute("SELECT * FROM Transac")
for i in cu: #Used for print data from database
print(i)'''
elif p=='no':
print('You saved paper!Well done!')
t=str(t)
cur.execute("INSERT INTO Transac(customer,items,pay_method,date,transaction_id,total_price,amount_paid,change,reciept) VALUES(?,?,?,?,?,?,?,?,?)",(name,t,Payment_Method,d,tran,Final_Price,csh,change,p))
con.commit()
'''cu=con.execute("SELECT * FROM Transac")
for i in cu:
print(i)'''
break
else:
print("No choice!")
t=str(t)
cur.execute("INSERT INTO Transac(customer,items,pay_method,date,transaction_id,total_price,amount_paid,change,reciept) VALUES(?,?,?,?,?,?,?,?,?)",(name,t,Payment_Method,d,tran,Final_Price,csh,change,p))
con.commit()
'''cu=con.execute("SELECT * FROM Transac")
for i in cu:
print(i)'''
break
print("\tPayment Successful!\t\nEnjoy your Meal, {0}".format(name))
break
break
elif Process=='CANCEL':
d=dt.datetime.now()
print(d)
print("\tPayment UnSuccessful!\t\nPlease Pay to complete your order, {0}".format(name))
break
else:
d=dt.datetime.now()
print(d)
print("!Sorry! This Payment Unsuccessful!\tPlease Pay !")
continue
elif Payment_Method=='COFFEE BUCKS':
print("Overall Charges with GST(5%) included: ",Final_Price,"Rs/-")
Process=input("For Payment Press (Proceed) or (Cancel)")
Process=Process.upper().strip()
if Process=='PROCEED':
try:
counter=randint(1,6)
token=randint(1,30)
csh=int(input('Enter the amount to pay! '))
if csh<Final_Price:
print("Amount is less than payable!!\n")
break
else:
change=csh-Final_Price
change=int(change)
time.sleep(1.5)
code='ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'
tran=''
for c in range(11):
tran+=random.choice(code)
print('\n\tYour transaction id: ',tran)
print('\n\tPlease collect the change: ',change)
print('\n\nYour token no is: {}, & Please visit Counter No.: {}'.format(token,counter))
time.sleep(1.5)
d=dt.datetime.now()
print(d)
p=input('\nDo you want to print a reciept?[Yes/No]')
p=p.lower()
except Exception as e:
print(e)
break
if p=='yes':
p=f'Cafe_{tran}.pdf'
ca=canvas.Canvas(f'Cafe_{tran}.pdf',pagesize=letter)
ca.setLineWidth(.3)
ca.setFont('Helvetica', 11)
ca.line(25,790,595,790)
ca.line(25,790,25,330)
ca.line(595,790,595,330)
ca.drawString(435,750,"{}".format(d))
ca.line(435,747,590,747)
ca.drawString(30,750,'|_|CAFE SHOP!!')
ca.drawString(30,735,'OF BREAK-ZONE INDUSTRIES')
ca.drawString(30,705,'RECEIVED BY:')
ca.drawString(120,705,":-{}".format(name))
ca.line(120,700,580,700)
ca.drawString(200,680, "***\t\t\tCash Memo\t\t\t***")
ca.drawString(50,650, "Items Purchased: {}".format(t))
ca.line(140,645,590,645)
ca.drawString(50,620, "Price of Items Purchased: ____________________________________")
ca.drawString(445,620,":-{}".format(Cost))
ca.drawString(50,595, "TAX on Items Purchased: _________________________________")
ca.drawString(445,595,":-{}".format(GST))
ca.drawString(50,570, "Total Price including GST(5%) on deal: ___________________________")
ca.drawString(445,570,":-Rs.{}/-".format(Final_Price))
ca.drawString(50,545, "Mode of Payment: ________________________________")
ca.drawString(445,545,":-{}".format(Payment_Method))
ca.drawString(50,520, "Your transaction id: _______________________________________")
ca.drawString(445,520,":-{}".format(tran))
ca.drawString(50,495, "Amount Paid by customer | |
<gh_stars>0
import weakref
from lxml import etree as etree_
import urllib
from sdc11073 import namespaces
from .. import loghelper
from ..namespaces import msgTag, domTag, QN_TYPE, nsmap, DocNamespaceHelper
from ..namespaces import Prefix_Namespace as Prefix
from ..pysoap.soapenvelope import Soap12Envelope, WsAddress, GenericNode, ExtendedDocumentInvalid
from ..safety import SafetyInfoHeader
class HostedServiceClient(object):
""" Base class of clients that call hosted services of a dpws device."""
VALIDATE_MEX = False # workaraound as long as validation error due to missing dpws schema is not solved
subscribeable_actions = tuple()
def __init__(self, soapClient, dpws_hosted, porttype, validate, sdc_definitions, bicepsParser, log_prefix=''):
'''
@param simple_xml_hosted_node: a "Hosted" node in a simplexml document
'''
self.endpoint_reference = dpws_hosted.endpointReferences[0]
self._url = urllib.parse.urlparse(self.endpoint_reference.address)
self.porttype = porttype
self._logger = loghelper.getLoggerAdapter('sdc.client.{}'.format(porttype), log_prefix)
self._operationsManager = None
self._validate = validate
self._sdc_definitions = sdc_definitions
self._bicepsParser = bicepsParser
self.soapClient = soapClient
self.log_prefix = log_prefix
self._mdib_wref = None
self.predefined_actions = {} # calculated actions for subscriptions
for s in self.subscribeable_actions:
self.predefined_actions[s] = self._getActionString(s)
@property
def _bmmSchema(self):
return None if not self._validate else self._bicepsParser.bmmSchema
@property
def _mexSchema(self):
return None if not self._validate else self._bicepsParser.mexSchema
def register_mdib(self, mdib):
''' Client sometimes must know the mdib data (e.g. Set service, activate method).'''
if mdib is not None and self._mdib_wref is not None:
raise RuntimeError('Client "{}" has already an registered mdib'.format(self.porttype))
self._mdib_wref = None if mdib is None else weakref.ref(mdib)
def setOperationsManager(self, operationsManager):
self._operationsManager = operationsManager
def _callOperation(self, soapEnvelope, request_manipulator=None):
return self._operationsManager.callOperation(self, soapEnvelope, request_manipulator)
def getSubscribableActions(self):
""" action strings only predefined"""
return self.predefined_actions.values()
def _getActionString(self, methodName):
actions_lookup = self._sdc_definitions.Actions
try:
return getattr(actions_lookup, methodName)
except AttributeError: # fallback, if a definition is missing
return '{}/{}/{}'.format(self._sdc_definitions.ActionsNamespace, self.porttype, methodName)
def __repr__(self):
return '{} "{}" endpoint = {}'.format(self.__class__.__name__, self.porttype, self.endpoint_reference)
def postSoapEnvelope(self, soapEnvelope, msg, request_manipulator=None):
return self.soapClient.postSoapEnvelopeTo(self._url.path, soapEnvelope, msg=msg, request_manipulator=request_manipulator)
def _mkSetMethodSoapEnvelope(self, methodName, operationHandle, requestNodes, additionalNamespaces=None):
''' helper to create the soap envelope
@param methodName: last element of name of the called action
@param operationHandle: handle name as string
@param requestNodes: a list of etree_ nodes that will become Subelement of Method name element
'''
soapBodyNode = etree_.Element( msgTag(methodName))
ref = etree_.SubElement(soapBodyNode, msgTag('OperationHandleRef'), attrib={QN_TYPE: '{}:HandleRef'.format(Prefix.PM.prefix)}, nsmap=Prefix.partialMap(Prefix.PM))
ref.text = operationHandle
for n in requestNodes:
soapBodyNode.append(n)
if additionalNamespaces:
my_ns = Prefix.partialMap(Prefix.S12, Prefix.WSA, Prefix.PM, Prefix.MSG, *additionalNamespaces)
else:
my_ns = Prefix.partialMap(Prefix.S12, Prefix.WSA, Prefix.PM, Prefix.MSG)
sih = self._mkOptionalSafetyHeader(soapBodyNode, operationHandle) # a header or None
soapEnvelope = Soap12Envelope(my_ns)
action = self._getActionString(methodName)
soapEnvelope.setAddress(WsAddress(action=action, to=self.endpoint_reference.address))
if sih is not None:
soapEnvelope.addHeaderObject(sih)
soapEnvelope.addBodyElement(soapBodyNode)
soapEnvelope.validateBody(self._bmmSchema)
return soapEnvelope
def _mkGetMethodEnvelope(self, method, params = None):
action = self._getActionString(method)
bodyNode = etree_.Element(msgTag(method))
soapEnvelope = Soap12Envelope(Prefix.partialMap(Prefix.S12, Prefix.WSA, Prefix.MSG))
soapEnvelope.setAddress(WsAddress(action=action,
to=self.endpoint_reference.address))
if params:
for p in params:
bodyNode.append(p)
soapEnvelope.addBodyObject(GenericNode(bodyNode))
return soapEnvelope
def _callGetMethod(self, method, params = None, request_manipulator=None):
self._logger.info('calling {} on {}:{}', method, self._url.netloc, self._url.path)
soapEnvelope = self._mkGetMethodEnvelope(method, params)
soapEnvelope.validateBody(self._bmmSchema)
returnedEnvelope = self.postSoapEnvelope(soapEnvelope, msg='get {}'.format(method),
request_manipulator=request_manipulator)
try:
returnedEnvelope.validateBody(self._bmmSchema)
except ExtendedDocumentInvalid as ex:
self._logger.error('Validation error: {}', ex)
except TypeError as ex:
self._logger.error('Could not validate Body, Type Error :{}', ex)
except Exception as ex:
self._logger.error('Validation error: "{}" msgNode={}', ex, returnedEnvelope.msgNode)
return returnedEnvelope
def _mkSoapEnvelope(self, methodName, xmlBodyString=None, additionalHeaders=None):
action = self._getActionString(methodName)
soapEnvelope = Soap12Envelope(Prefix.partialMap(Prefix.S12, Prefix.MSG, Prefix.WSA))
soapEnvelope.setAddress(WsAddress(action=action, to=self.endpoint_reference.address))
if additionalHeaders is not None:
for h in additionalHeaders:
soapEnvelope.addHeaderObject(h)
if xmlBodyString is not None:
soapEnvelope.addBodyString(xmlBodyString)
return soapEnvelope
def _mkSoapEnvelopeWithEtreeBody(self, methodName, etreeBody=None, additionalHeaders=None):
tmp = etree_.tostring(etreeBody)
return self._mkSoapEnvelope(methodName, tmp, additionalHeaders)
def _callMethodWithXMLStringArgument(self, portTypeName, methodName, xmlStringArgument=None, additionalHeaders=None):
soapEnvelope = self._mkSoapEnvelope(methodName, xmlStringArgument, additionalHeaders)
soapEnvelope.validateBody(self._bmmSchema)
retEnvelope = self.postSoapEnvelope(soapEnvelope, msg='port {} method {}'.format(portTypeName, methodName))
retEnvelope.validateBody(self._bmmSchema)
return retEnvelope
def _callMethodWithEtreeNodeArgument(self, portTypeName, methodName, etreeNodeArgument=None, additionalHeaders=None):
tmp = etree_.tostring(etreeNodeArgument)
return self._callMethodWithXMLStringArgument(portTypeName, methodName, tmp, additionalHeaders)
def _mkOptionalSafetyHeader(self, soapBodyNode, operationHandle):
if self._mdib_wref is not None:
op_descriptor = self._mdib_wref().descriptions.handle.getOne(operationHandle, allowNone=True)
if op_descriptor is not None and op_descriptor.SafetyReq is not None:
mdib_node = self._mdib_wref().reconstructMdibWithContextStates()
return self._mkSoapSafetyHeader(soapBodyNode, op_descriptor.SafetyReq, mdib_node)
return None
def _mkSoapSafetyHeader(self, soapBodyNode, t_SafetyReq, mdibNode):
dualChannelSelectors = {}
safetyContextSelectors = {}
if not t_SafetyReq.DualChannelDef:
self._logger.info('no DualChannel selectors specified')
else:
for sel in t_SafetyReq.DualChannelDef.Selector:
selectorId = sel.Id
selectorPath = sel.text
values = soapBodyNode.xpath(selectorPath, namespaces=mdibNode.nsmap)
if len(values) == 1:
self._logger.debug('DualChannel selector "{}": value = "{}", path= "{}"', selectorId, values[0], selectorPath)
dualChannelSelectors[selectorId] = str(values[0]).strip()
elif len(values) == 0:
self._logger.error('DualChannel selector "{}": no value found! path= "{}"', selectorId, selectorPath)
else:
self._logger.error('DualChannel selector "{}": path= "{}", multiple values found: {}', selectorId, selectorPath, values)
if not t_SafetyReq.SafetyContextDef:
self._logger.info('no Safety selectors specified')
else:
for sel in t_SafetyReq.SafetyContextDef.Selector:
selectorId = sel.Id
selectorPath = sel.text
# check the selector, there is a potential problem with the starting point of the xpath search path:
if selectorPath.startswith('//'):
# double slashes means that the matching pattern can be located anywhere in the dom tree.
# No problem.
pass #
elif selectorPath.startswith('/'):
# Problem! if the selector starts with a single slash, this is a xpath search that starts at the document root.
# But the convention is that the xpath search shall start from the top level element (=> without the toplevel element in the path)
# In order to follow this convention, remove the leading slash and start the search relative to the lop level node.
selectorPath = selectorPath[1:]
values = mdibNode.xpath(selectorPath, namespaces=mdibNode.nsmap)
if len(values) == 1:
self._logger.debug('Safety selector "{}": value = "{}" path= "{}"', selectorId, values[0], selectorPath)
safetyContextSelectors[selectorId] = str(values[0]).strip()
elif len(values) == 0:
self._logger.error('Safety selector "{}": no value found! path= "{}"', selectorId, selectorPath)
else:
self._logger.error('Safety selector "{}": path= "{}", multiple values found: {}', selectorId, selectorPath, values)
if dualChannelSelectors or safetyContextSelectors:
return SafetyInfoHeader(dualChannelSelectors, safetyContextSelectors)
else:
return None
class GetServiceClient(HostedServiceClient):
def getMdDescriptionNode(self, requestedHandles=None, request_manipulator=None):
"""
@param requestedHandles: None if all descriptors shall be requested, otherwise a list of handles
"""
requestparams = []
if requestedHandles is not None:
for h in requestedHandles:
node = etree_.Element(msgTag('HandleRef'))
node.text = h
requestparams.append(node)
resultSoapEnvelope = self._callGetMethod('GetMdDescription', params=requestparams,
request_manipulator=request_manipulator)
return resultSoapEnvelope.msgNode
def getMdib(self, request_manipulator=None):
resultSoapEnvelope = self._callGetMethod('GetMdib', request_manipulator=request_manipulator)
return resultSoapEnvelope
def getMdibNode(self, request_manipulator=None):
resultSoapEnvelope = self._callGetMethod('GetMdib', request_manipulator=request_manipulator)
return resultSoapEnvelope.msgNode
def getMdState(self, requestedHandles=None, request_manipulator=None):
"""
@param requestedHandles: None if all states shall be requested, otherwise a list of handles
"""
requestparams = []
if requestedHandles is not None:
for h in requestedHandles:
node = etree_.Element(msgTag('HandleRef'))
node.text = h
requestparams.append(node)
resultSoapEnvelope = self._callGetMethod('GetMdState', params=requestparams,
request_manipulator=request_manipulator)
return resultSoapEnvelope
def getMdStateNode(self, requestedHandles=None, request_manipulator=None):
"""
@param requestedHandles: None if all states shall be requested, otherwise a list of handles
"""
return self.getMdState(requestedHandles, request_manipulator=request_manipulator).msgNode
class SetServiceClient(HostedServiceClient):
subscribeable_actions = ('OperationInvokedReport',)
def setNumericValue(self, operationHandle, requestedNumericValue, request_manipulator=None):
""" call SetNumericValue Method of device
@param operationHandle: a string
@param requestedNumericValue: int or float or a string representing a decimal number
@return a Future object
"""
self._logger.info('setNumericValue operationHandle={} requestedNumericValue={}',
operationHandle, requestedNumericValue)
soapEnvelope = self._mkRequestedNumericValueEnvelope(operationHandle, requestedNumericValue)
return self._callOperation(soapEnvelope, request_manipulator=request_manipulator)
def setString(self, operationHandle, requestedString, request_manipulator=None):
""" call SetString Method of device
@param operationHandle: a string
@param requestedString: a string
@return a Future object
"""
self._logger.info('setString operationHandle={} requestedString={}',
operationHandle, requestedString)
soapEnvelope = self._mkRequestedStringEnvelope(operationHandle, requestedString)
return self._callOperation(soapEnvelope, request_manipulator=request_manipulator)
def setAlertState(self, operationHandle, proposedAlertState, request_manipulator=None):
"""The SetAlertState method corresponds to the SetAlertStateOperation objects in the MDIB and allows the modification of an alert.
It can handle a single proposed AlertState as argument (only for backwards compatibility) and a list of them.
@param operationHandle: handle name as string
@param proposedAlertState: domainmodel.AbstractAlertState instance or a list of them
"""
self._logger.info('setAlertState operationHandle={} requestedAlertState={}',
operationHandle, proposedAlertState)
if hasattr(proposedAlertState, 'NODETYPE'):
# this is a state container. make it a list
proposedAlertState = [proposedAlertState]
soapEnvelope = self._mkSetAlertEnvelope(operationHandle, proposedAlertState)
return self._callOperation(soapEnvelope, request_manipulator=request_manipulator)
def setMetricState(self, operationHandle, proposedMetricStates, request_manipulator=None):
"""The SetMetricState method corresponds to the SetMetricStateOperation objects in the MDIB and allows the modification of metric states.
@param operationHandle: handle name as string
@param proposedMetricStates: a list of domainmodel.AbstractMetricState instance or derived class
"""
self._logger.info('setMetricState operationHandle={} requestedMetricState={}',
operationHandle, proposedMetricStates)
soapEnvelope = self._mkSetMetricStateEnvelope(operationHandle, proposedMetricStates)
return self._callOperation(soapEnvelope, request_manipulator=request_manipulator)
def activate(self, operationHandle, value, request_manipulator=None):
""" an activate call does not return the result of the operation directly. Instead you get an transaction id,
and will receive the status of this transaction as notification ("OperationInvokedReport").
This method returns a "future" object. The future object has a result as soon as a final transaction state is received.
@param operationHandle: a string
@param value: a string
@return: a concurrent.futures.Future object
"""
# make message body
self._logger.info('activate handle={} value={}', operationHandle, value)
soapBodyNode = etree_.Element(msgTag('Activate'), | |
<gh_stars>0
"""Objects and functions for working with probability distributions and
related properties.
Internally, we often deal with the logarithm of the probability
distribution along a path of interest instead of the free energy
differences, which differ only by a minus sign.
In gchybrid, we refer to the logarithm of the order parameter
distribution as lnpi_op.dat, the logarithm of the growth expanded
ensemble distribution as lnpi_tr.dat, the logarithm of the exchange path
distribution as lnpi_ex.dat, and the logarithm of the regrowth path
distribution as lnpi_rg.dat. Similarly, the frequency distribution
along each path is contained in the file hits_*.dat, where
the * matches the appropriate two-letter suffix.
"""
import copy
import os.path
import numpy as np
class TransitionMatrix(object):
"""A base class for an acceptance probability matrix along a
specified path.
Attributes:
index: A numpy array or dict describing the states in the
matrix.
fw_atts: An array with the number of forward transition attempts
for each state.
rev_atts: An array with the number of reverse transition
attempts for each state.
fw_probs: An array with the acceptance probability for forward
transitions from each state.
rev_probs: An array with the acceptance probability for forward
transitions from each state.
"""
def __init__(self, index, fw_atts, rev_atts, fw_probs, rev_probs):
self.index = index
self.fw_atts = fw_atts
self.rev_atts = rev_atts
self.fw_probs = fw_probs
self.rev_probs = rev_probs
def __len__(self):
return len(self.fw_atts)
def get_poorly_sampled_attempts(self, cutoff):
"""Determine which subensemble/molecule/growth stage
combinations are not adequately sampled.
For each combination, we take the minimum of the number of
forward and backward transition attempts. If this number is
less than the average over all combinations times some cutoff
fraction, then we add it to the list of poorly sampled
combinations.
Args:
cutoff: The fraction of the mean to use as a threshold for
sampling quality.
Returns:
A boolean numpy array, where True denotes states which
don't meet the sampling quality threshold.
"""
fw, rev = self.fw_atts, self.rev_atts
avg = np.mean([min(a, rev[i + 1]) for i, a in enumerate(fw[:-1])])
drop = np.tile(False, len(fw))
drop[-1] = True
for i, a in enumerate(fw[:-1]):
if min(a, rev[i + 1]) < cutoff * avg:
drop[i] = True
return drop
class OrderParamTransitionMatrix(TransitionMatrix):
"""An acceptance probability matrix along the order parameter path.
Attributes:
index: A numpy array with the order parameter values.
fw_atts: An array with the number of forward transition attempts
for each state.
rev_atts: An array with the number of reverse transition
attempts for each state.
fw_probs: An array with the acceptance probability for forward
transitions from each state.
rev_probs: An array with the acceptance probability for forward
transitions from each state.
"""
def __init__(self, index, fw_atts, rev_atts, fw_probs, rev_probs):
super().__init__(index, fw_atts, rev_atts, fw_probs, rev_probs)
def calculate_lnpi_op(self, guess, min_attempts=1):
"""Calculate the free energy of the order parameter path.
Args:
guess: A numpy array or OrderParamDistribution with an
initial guess for the free energy.
min_attempts: The minimum number of transitions in each
direction required to consider the transition matrix
when updating the free energy estimate.
Returns:
An OrderParamDistribution.
"""
dist = np.zeros(len(self))
for i, dc in enumerate(np.diff(guess)):
dist[i + 1] = dist[i] + dc
fw_prob = self.fw_probs[i]
rev_prob = self.rev_probs[i + 1]
if (self.fw_atts[i] > min_attempts and
self.rev_atts[i + 1] > min_attempts and fw_prob > 0.0 and
rev_prob > 0.0):
dist[i + 1] += np.log(fw_prob / rev_prob)
return OrderParamDistribution(index=self.index, log_probs=dist)
def write(self, path):
"""Write the transition matrix to a file.
Args:
path: The location of the file to write.
"""
fmt = 3 * ['%8d'] + 2 * ['%.11e']
arr = np.column_stack((self.index, self.rev_atts, self.fw_atts,
self.rev_probs, self.fw_probs))
np.savetxt(path, arr, fmt=fmt, delimiter=' ')
class TransferTransitionMatrix(TransitionMatrix):
"""An acceptance probability matrix along the molecule transfer
path.
Attributes:
index: A dict with the overall number, molecule, subensemble,
and growth stage of each state in the path.
fw_atts: An array with the number of forward transition attempts
for each state.
rev_atts: An array with the number of reverse transition
attempts for each state.
fw_probs: An array with the acceptance probability for forward
transitions from each state.
rev_probs: An array with the acceptance probability for forward
transitions from each state.
"""
def __init__(self, index, fw_atts, rev_atts, fw_probs, rev_probs):
super().__init__(index, fw_atts, rev_atts, fw_probs, rev_probs)
def calculate_lnpi_tr(self, guess, min_attempts=1):
"""Calculate the free energy of the order parameter path.
Args:
guess: A numpy array or TransferDistribution with an
initial guess for the free energy along the molecule
transfer path.
min_attempts: The minimum number of transitions in each
direction required to consider the transition matrix
when updating the free energy estimate.
Returns:
A TransferDistribution.
"""
dist = np.zeros(len(self))
ind = self.index
mol, sub, stages = ind['molecules'], ind['subensembles'], ind['stages']
for m in np.unique(mol):
for s in np.unique(sub):
sel = (mol == m) & (sub == s)
if len(stages[sel]) == 1:
continue
for g in stages[sel][-2::-1]:
cs = sel & (stages == g)
ns = sel & (stages == g + 1)
dist[cs] = dist[ns] + guess[cs] - guess[ns]
if (self.fw_atts[cs] > min_attempts and
self.rev_atts[ns] > min_attempts and
self.fw_probs[cs] > 0.0 and
self.rev_probs[ns] > 0.0):
dist[cs] -= np.log(self.fw_probs[cs] /
self.rev_probs[ns])
return TransferDistribution(index=ind, log_probs=dist)
def calculate_lnpi_op(self, tr_guess, op_guess, species=1, min_attempts=1):
"""Calculate the free energy of the order parameter path using
the transfer path of the order parameter species.
This method is only applicable for direct simulations.
Args:
tr_guess: A numpy array or TransferDistribution with an
initial guess for the free energy along the molecule
transfer path.
op_guess: A numpy array or OrderParamDistribution with an
initial guess for the free energy along the order
parameter path.
species: The order parameter species.
min_attempts: The minimum number of transitions in each
direction required to consider the transition matrix
when updating the free energy estimate.
Returns:
An OrderParamDistribution.
"""
ind = self.index
mol, sub, stages = ind['molecules'], ind['subensembles'], ind['stages']
uniq_sub = np.unique(sub)
dist = np.zeros(len(uniq_sub))
lnpi_tr = self.calculate_lnpi_tr(tr_guess)
for i in uniq_sub[1:]:
sampled = True
fsub = (mol == species) & (sub == i - 1)
rsub = (mol == species) & (sub == i)
fs = fsub & (stages == np.amax(stages[fsub]))
rs = rsub & (stages == np.amin(stages[rsub]))
diff = tr_guess[rs] - tr_guess[fs]
if (self.fw_atts[fs] > min_attempts and
self.rev_atts[rs] > min_attempts and
self.fw_probs[fs] > 0.0 and
self.rev_probs[rs] > 0.0):
diff += np.log(self.fw_probs[fs] / self.rev_probs[rs])
else:
sampled = False
for m in stages[rsub][1:]:
lm = rsub & (stages == m - 1)
cm = rsub & (stages == m)
if (self.fw_atts[lm] > min_attempts and
self.rev_atts[cm] > min_attempts and sampled):
diff += lnpi_tr[cm] - lnpi_tr[lm]
else:
sampled = False
if sampled:
dist[i] = dist[i - 1] + diff
else:
dist[i] = dist[i - 1] + op_guess[i] - op_guess[i - 1]
return OrderParamDistribution(index=uniq_sub, log_probs=dist)
def write(self, path):
"""Write the transition matrix to a file.
Args:
path: The location of the file to write.
"""
ind = self.index
fmt = 6 * ['%8d'] + 2 * ['%.11e']
arr = np.column_stack((
ind['number'], ind['subensembles'], ind['molecules'],
ind['stages'], self.rev_atts, self.fw_atts, self.rev_probs,
self.fw_probs))
np.savetxt(path, arr, fmt=fmt, delimiter=' ')
def _read_tr_index(path):
num, sub, mol, stg = np.loadtxt(path, usecols=(0, 1, 2, 3),
dtype='int', unpack=True)
return {'numbers': num, 'subensembles': sub, 'molecules': mol,
'stages': stg}
def read_pacc(path):
"""Read a pacc_op_*.dat file or a pacc_tr_*.dat file.
Args:
path: The location of the file to read.
Returns:
A TransitionMatrix object.
"""
base = os.path.basename(path)
if 'tr' in base:
index = _read_tr_index(path)
rev_atts, fw_atts, rev_probs, fw_probs = np.loadtxt(
path, usecols=(4, 5, 6, 7), unpack=True)
return TransferTransitionMatrix(
index, fw_atts=fw_atts.astype('int'),
rev_atts=rev_atts.astype('int'), fw_probs=fw_probs,
rev_probs=rev_probs)
elif 'op' in base:
index, rev_atts, fw_atts, rev_probs, fw_probs = np.loadtxt(
path, usecols=(0, 1, 2, 3, 4), unpack=True)
index = index.astype('int')
return OrderParamTransitionMatrix(
index, fw_atts=fw_atts.astype('int'),
rev_atts=rev_atts.astype('int'), fw_probs=fw_probs,
rev_probs=rev_probs)
else:
raise NotImplementedError
def combine_matrices(matrices):
"""Combine a set of transition matrices.
Args:
matrices: A list of TransitionMatrix-like objects to combine.
Returns:
An instance of an appropriate subclass of TransitionMatrix with
the combined data.
"""
index = matrices[0].index
fw_atts = sum(m.fw_atts for m in matrices)
rev_atts = sum(m.rev_atts for m in matrices)
fw_probs = sum(m.fw_atts * m.fw_probs for m in matrices) / fw_atts
rev_probs = sum(m.rev_atts * m.rev_probs for m in matrices) / rev_atts
fw_probs, rev_probs = np.nan_to_num(fw_probs), np.nan_to_num(rev_probs)
| |
= user_data.get("name", "")
device["imageId"] = user_data.get("image", "")
try:
Image.objects.get(pk=device["imageId"])
except ObjectDoesNotExist:
raise Exception("Not all images are present!")
logger.debug(json_object)
device["ram"] = user_data.get("ram", 1024)
device["cpu"] = user_data.get("cpu", 1)
device["interfacePrefix"] = user_data.get("interfacePrefix", "")
device["configurationFile"] = user_data.get("configurationFile", "")
try:
device["slot_offset"] = int(user_data.get("pciSlotOffset", 3))
except ValueError:
logger.warn("Could not parse int from pciSlotOffset")
device["slot_offset"] = 3
device["interfaceType"] = user_data.get("interfaceType", "")
device["smbiosProduct"] = user_data.get("smbiosProductString", "")
device["smbiosManufacturer"] = user_data.get("smbiosManufacturer", "")
device["smbiosVersion"] = user_data.get("smbiosVersion", "")
device["secondaryDiskParams"] = user_data.get("secondaryDiskParams", [])
device["tertiaryDiskParams"] = user_data.get("tertiaryDiskParams", [])
device["managementInterface"] = user_data.get("mgmtInterface", "")
try:
device["resizeImage"] = int(user_data.get("resize", 0))
except ValueError:
logger.warn("couldn't parse int from resizeImage value!")
device["resizeImage"] = 0
device["ip"] = user_data.get("ip", "")
device["type"] = user_data.get("type", "")
device["user"] = "root"
if "user" in user_data:
device["user"] = user_data.get("user", "")
device["password"] = user_data.get("password", "")
device["companionInterfaceMirror"] = user_data.get("companionInterfaceMirror", "")
device["companionInterfaceMirrorOffset"] = user_data.get("companionInterfaceMirrorOffset", "")
device["mirroredInterfaces"] = []
device["configScriptId"] = 0
device["configScriptParam"] = 0
if "cloudInitSupport" in user_data:
device["cloudInitSupport"] = user_data.get("cloudInitSupport", False)
device["configDriveSupport"] = False
if "configDriveSupport" in user_data:
device["configDriveSupport"] = user_data.get("configDriveSupport", "")
if "configDriveParams" in user_data:
device["configDriveParams"] = user_data.get("configDriveParams", "")
device["configDriveParamsFile"] = user_data.get("configDriveParamsFile", "")
else:
device["configDriveParams"] = list()
if "configScriptId" in user_data:
logger.debug("Found a configScript to use!")
device["configScriptId"] = user_data.get("configScriptId", "")
device["configScriptParam"] = user_data.get("configScriptParam", "")
device["uuid"] = json_object.get('id', '')
device["interfaces"] = []
device['vncPort'] = 0
if configuration.deployment_backend == "kvm":
# determine next available VNC port that has not currently been assigned
next_vnc_port = libvirtUtils.get_next_domain_vnc_port(device_index)
# verify that this port is not actually in use by another process
while osUtils.check_port_in_use(next_vnc_port):
device_index += 1
next_vnc_port = libvirtUtils.get_next_domain_vnc_port(device_index)
device["vncPort"] = next_vnc_port
# is this a child VM?
# children will *always* have a parent attribute set in their userdata
parent_id = user_data.get("parent", "")
logger.debug("Found parent_id of: %s for device: %s" % (parent_id, device["name"]))
if parent_id == "":
logger.debug("setting isChild to False")
device["isChild"] = False
else:
logger.debug("setting isChild to True")
device["isChild"] = True
# use chassis name as the naming convention for all the bridges
# we'll create networks as 'topology_id + _ + chassis_name + function
# i.e. t1_vmx01_c and t1_vmx01_c
chassis_name = user_data.get("name", "")
if "parentName" in user_data:
chassis_name = user_data.get("parentName", "")
if "parent" in user_data:
device["parent"] = user_data.get("parent", "")
logger.debug("Using chassis name of: %s" % chassis_name)
if chassis_name in chassis_name_to_index:
chassis_id = chassis_name_to_index[chassis_name]
else:
chassis_id = device_index
chassis_name_to_index[chassis_name] = chassis_id
# set this property for use later, we'll loop again after we have configured all the connections
# to create the management interface at the end (i.e. for Linux hosts)
device["mgmtInterfaceIndex"] = user_data.get("mgmtInterfaceIndex", 0)
# now let's create the interfaces declared so far
if device["mgmtInterfaceIndex"] != -1:
device_interface_wiring = dict()
# setup management interface
# management interface mi will always be connected to default management network (virbr0 on KVM)
mi = dict()
# slight optimization for kvm backend, dont generate new mac
if configuration.deployment_backend == "kvm" and \
is_deployed and \
libvirtUtils.domain_exists(device['name']):
mi['mac'] = libvirtUtils.get_management_interface_mac_for_domain(device['name'])
else:
mi['mac'] = generate_next_mac(topology_id)
mi["bridge"] = "virbr0"
mi["type"] = user_data.get("mgmtInterfaceType", "virtio")
mi["bridge_preexists"] = True
device_interface_wiring[device["mgmtInterfaceIndex"]] = mi
for dummy in user_data.get("dummyInterfaceList", []):
dm = dict()
dm["mac"] = generate_next_mac(topology_id)
dm["bridge"] = "t%s_d" % str(topology_id)
dm["type"] = user_data.get("mgmtInterfaceType", "virtio")
dm["bridge_preexists"] = False
device_interface_wiring[dummy] = dm
for companion in user_data.get("companionInterfaceList", []):
cm = dict()
cm["mac"] = generate_next_mac(topology_id)
cm["bridge"] = "t%s_%s_c" % (str(topology_id), chassis_id)
cm["type"] = user_data.get("interfaceType", "virtio")
cm["bridge_preexists"] = False
device_interface_wiring[companion] = cm
# we do have management interfaces first, so let's go ahead and add them to the device
# THIS ASSUMES THE JSON CONFIGURATION IS VALID! I.E. all interface indexes are accounted for
# 0, 1, 2, 3 etc.
interfaces = device_interface_wiring.keys()
interfaces.sort()
for interface in interfaces:
interface_config = device_interface_wiring[interface]
interface_config["slot"] = "%#04x" % int(len(device["interfaces"]) + device["slot_offset"])
device["interfaces"].append(interface_config)
# let's check if we've already set this bridge to be created
found = False
for network in networks:
if network["name"] == interface_config["bridge"]:
found = True
break
# let's go ahead and add this to the networks list if needed
if not found and interface_config["bridge"] != "virbr0":
nn = dict()
nn["name"] = interface_config["bridge"]
nn["mac"] = generate_next_mac(topology_id)
networks.append(nn)
device_index += 1
devices.append(device)
# this object is not a VM, let's check if it's a cloud/bridge object
elif json_object["type"] == "draw2d.shape.node.externalCloud":
if json_object["userData"]["label"] == "External":
# is this an old topology? manually fix here!
external_bridges[json_object["id"]] = "br0"
else:
# track all external bridges here for later use
external_bridges[json_object["id"]] = json_object["userData"]["label"]
elif json_object["type"] == "draw2d.shape.node.internalCloud":
# track all internal bridges as well
internal_bridges.append(json_object["id"])
conn_index = 1
for json_object in json_data:
if json_object["type"] == "draw2d.Connection":
target_uuid = json_object["target"]["node"]
source_uuid = json_object["source"]["node"]
# should we create a new bridge for this connection?
create_bridge = True
bridge_name = "t" + str(topology_id) + "_br" + str(conn_index)
for d in devices:
if d["uuid"] == source_uuid:
# slot should always start with 6 (or 5 for vmx phase 2/3)
slot = "%#04x" % int(len(d["interfaces"]) + device["slot_offset"])
interface = dict()
interface["mac"] = generate_next_mac(topology_id)
# does this bridge already exist? Possibly external bridge for example
# essentially same of create_bridge flag, but kept on the interface for later use in heat template
interface["bridge_preexists"] = False
if target_uuid in internal_bridges:
bridge_name = "t" + str(topology_id) + "_p_br" + str(internal_bridges.index(target_uuid))
interface["bridge"] = bridge_name
elif target_uuid in external_bridges.keys():
bridge_name = external_bridges[target_uuid]
interface["bridge"] = bridge_name
# do not create external bridges...
create_bridge = False
interface["bridge_preexists"] = True
else:
interface["bridge"] = bridge_name
interface["slot"] = slot
interface["name"] = device["interfacePrefix"] + str(len(d["interfaces"]))
interface["linkId"] = json_object["id"]
interface["type"] = device["interfaceType"]
d["interfaces"].append(interface)
# do we need to mirror interfaces up to the parent VM?
if d["companionInterfaceMirror"] and "parent" in d:
pci_slot_str = "%#04x" % int(len(d["interfaces"]) + d["companionInterfaceMirrorOffset"])
em = dict()
em["mac"] = generate_next_mac(topology_id)
em["bridge"] = bridge_name
em["slot"] = pci_slot_str
for dd in devices:
if dd["uuid"] == d["parent"]:
em["type"] = dd["interfaceType"]
dd["mirroredInterfaces"].append(em)
break
elif d["uuid"] == target_uuid:
# slot should always start with 6
slot = "%#04x" % int(len(d["interfaces"]) + device["slot_offset"])
interface = dict()
interface["mac"] = generate_next_mac(topology_id)
interface["bridge_preexists"] = False
if source_uuid in internal_bridges:
bridge_name = "t" + str(topology_id) + "_p_br" + str(internal_bridges.index(source_uuid))
interface["bridge"] = bridge_name
if source_uuid in external_bridges.keys():
bridge_name = external_bridges[source_uuid]
interface["bridge"] = bridge_name
create_bridge = False
# keep bridge existence information on the interface for use in heat template
interface["bridge_preexists"] = True
else:
interface["bridge"] = bridge_name
interface["slot"] = slot
interface["name"] = device["interfacePrefix"] + str(len(d["interfaces"]))
interface["linkId"] = json_object["id"]
interface["type"] = device["interfaceType"]
d["interfaces"].append(interface)
# do we need to mirror interfaces up to the parent VM?
if d["companionInterfaceMirror"] and "parent" in d:
pci_slot_str = "%#04x" % int(len(d["interfaces"]) + d["companionInterfaceMirrorOffset"])
em = dict()
em["mac"] = generate_next_mac(topology_id)
em["bridge"] = bridge_name
em["slot"] = pci_slot_str
for dd in devices:
if dd["uuid"] == d["parent"]:
em["type"] = dd["interfaceType"]
dd["mirroredInterfaces"].append(em)
break
# let's check to see if we've already marked this internal bridge for creation
for c in networks:
if c["name"] == bridge_name:
logger.debug("Skipping bridge creation for " + bridge_name)
create_bridge = False
continue
if create_bridge is True:
logger.debug("Setting " + bridge_name + " for creation")
connection = dict()
connection["name"] = bridge_name
connection["mac"] = generate_next_mac(topology_id)
networks.append(connection)
conn_index += 1
# now let's add a management interface if it's required
# if index == -1, then the desire is to put it last!
for d in devices:
if d["mgmtInterfaceIndex"] == -1:
mi = dict()
# if this has already been deployed, let's preserve the existing mac address that has been assigned
if configuration.deployment_backend == "kvm" and \
is_deployed and \
libvirtUtils.domain_exists(device['name']):
mi['mac'] = libvirtUtils.get_management_interface_mac_for_domain(device['name'])
else:
mi['mac'] = generate_next_mac(topology_id)
mi["slot"] = "%#04x" % int(len(d["interfaces"]) + d["slot_offset"])
mi["bridge"] = "virbr0"
mi["type"] = user_data.get("mgmtInterfaceType", "virtio")
mi["bridge_preexists"] = True
d["interfaces"].append(mi)
topology_config = dict()
topology_config["networks"] = networks
topology_config["devices"] = devices
return topology_config
def clone_topology(topology_json):
"""
iterate through topology json and increment
all found management IPs to provide for some
small uniqueness protection. The right way to do this
would be to track all used management ips, but I would rather
each topology be a transient thing to be used and thrown away
:param topology_json: json string from Topology
:return: new topology_json with incremented management IPs
"""
try:
json_data = json.loads(topology_json)
except ValueError as ve:
| |
#!/usr/bin/env python3
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import mcdm
import numpy as np
import unittest
class TestRank(unittest.TestCase):
def test_rank_default(self):
"""Test the rank function with the default parameters."""
x_matrix = [
[0.00, 1.00],
[0.25, 0.75],
[0.50, 0.50],
[0.75, 0.25],
[1.00, 0.00],
]
obtained_ranking = mcdm.rank(x_matrix)
expected_ranking = [
("a1", 0.500000),
("a2", 0.500000),
("a3", 0.500000),
("a4", 0.500000),
("a5", 0.500000),
]
self.assertEqual(len(obtained_ranking), len(expected_ranking))
for i, tmp in enumerate(obtained_ranking):
self.assertEqual(tmp[0], expected_ranking[i][0])
self.assertAlmostEqual(tmp[1], expected_ranking[i][1], places=6)
def test_rank_saw_mw_linear1(self):
"""Test the rank function with the SAW, MW, Linear1 methods."""
x_matrix = [
[0.9, 30.0, 500.0, 4.0],
[0.1, 50.0, 5.0, 6.0],
[0.5, 80.0, 8.0, 6.0],
[0.8, 40.0, 100.0, 4.0],
[0.7, 60.0, 20.0, 5.0],
[0.6, 60.0, 10.0, 5.0],
]
obtained_ranking = mcdm.rank(
x_matrix, is_benefit_x=[True, False, False, True],
n_method="Linear1", w_method="MW", s_method="SAW")
expected_ranking = [
("a2", 0.677778),
("a1", 0.669167),
("a3", 0.638889),
("a6", 0.625000),
("a5", 0.590278),
("a4", 0.588889),
]
self.assertEqual(len(obtained_ranking), len(expected_ranking))
for i, tmp in enumerate(obtained_ranking):
self.assertEqual(tmp[0], expected_ranking[i][0])
self.assertAlmostEqual(tmp[1], expected_ranking[i][1], places=6)
def test_rank_saw_sd_linear1(self):
"""Test the rank function with the SAW, SD, Linear1 methods."""
x_matrix = [
[0.9, 30.0, 500.0, 4.0],
[0.1, 50.0, 5.0, 6.0],
[0.5, 80.0, 8.0, 6.0],
[0.8, 40.0, 100.0, 4.0],
[0.7, 60.0, 20.0, 5.0],
[0.6, 60.0, 10.0, 5.0],
]
obtained_ranking = mcdm.rank(
x_matrix, is_benefit_x=[True, False, False, True],
n_method="Linear1", w_method="SD", s_method="SAW")
expected_ranking = [
("a2", 0.653952),
("a3", 0.604472),
("a1", 0.601574),
("a6", 0.595749),
("a5", 0.539665),
("a4", 0.530537),
]
self.assertEqual(len(obtained_ranking), len(expected_ranking))
for i, tmp in enumerate(obtained_ranking):
self.assertEqual(tmp[0], expected_ranking[i][0])
self.assertAlmostEqual(tmp[1], expected_ranking[i][1], places=6)
def test_rank_saw_vic_linear1(self):
"""Test the rank function with the SAW, VIC, Linear1 methods."""
x_matrix = [
[0.9, 30.0, 500.0, 4.0],
[0.1, 50.0, 5.0, 6.0],
[0.5, 80.0, 8.0, 6.0],
[0.8, 40.0, 100.0, 4.0],
[0.7, 60.0, 20.0, 5.0],
[0.6, 60.0, 10.0, 5.0],
]
obtained_ranking = mcdm.rank(
x_matrix, is_benefit_x=[True, False, False, True],
n_method="Linear1", w_method="VIC", s_method="SAW")
expected_ranking = [
("a2", 0.650527),
("a1", 0.612074),
("a3", 0.599994),
("a6", 0.594459),
("a5", 0.540496),
("a4", 0.537186),
]
self.assertEqual(len(obtained_ranking), len(expected_ranking))
for i, tmp in enumerate(obtained_ranking):
self.assertEqual(tmp[0], expected_ranking[i][0])
self.assertAlmostEqual(tmp[1], expected_ranking[i][1], places=6)
def test_rank_saw_vic_ap_linear1(self):
"""Test the rank function with the SAW, VIC.AP, Linear1 methods."""
x_matrix = [
[0.9, 30.0, 500.0, 4.0],
[0.1, 50.0, 5.0, 6.0],
[0.5, 80.0, 8.0, 6.0],
[0.8, 40.0, 100.0, 4.0],
[0.7, 60.0, 20.0, 5.0],
[0.6, 60.0, 10.0, 5.0],
]
obtained_ranking = mcdm.rank(
x_matrix, is_benefit_x=[True, False, False, True],
n_method="Linear1", c_method="AbsPearson", w_method="VIC",
s_method="SAW")
expected_ranking = [
("a2", 0.644440),
("a1", 0.623018),
("a3", 0.593228),
("a6", 0.591963),
("a4", 0.543750),
("a5", 0.540097),
]
self.assertEqual(len(obtained_ranking), len(expected_ranking))
for i, tmp in enumerate(obtained_ranking):
self.assertEqual(tmp[0], expected_ranking[i][0])
self.assertAlmostEqual(tmp[1], expected_ranking[i][1], places=6)
def test_rank_mew_vic_linear1(self):
"""Test the rank function with the MEW, VIC, Linear1 methods."""
x_matrix = [
[0.9, 30.0, 500.0, 4.0],
[0.1, 50.0, 5.0, 6.0],
[0.5, 80.0, 8.0, 6.0],
[0.8, 40.0, 100.0, 4.0],
[0.7, 60.0, 20.0, 5.0],
[0.6, 60.0, 10.0, 5.0],
]
obtained_ranking = mcdm.rank(
x_matrix, is_benefit_x=[True, False, False, True],
n_method="Linear1", w_method="VIC", s_method="MEW")
expected_ranking = [
("a6", 0.583347),
("a3", 0.574199),
("a5", 0.480220),
("a2", 0.469420),
("a4", 0.304194),
("a1", 0.192606),
]
self.assertEqual(len(obtained_ranking), len(expected_ranking))
for i, tmp in enumerate(obtained_ranking):
self.assertEqual(tmp[0], expected_ranking[i][0])
self.assertAlmostEqual(tmp[1], expected_ranking[i][1], places=6)
def test_rank_saw_critic_linear2(self):
"""Test the rank function with the SAW, CRITIC, Linear2 methods."""
x_matrix = [
[0.9, 30.0, 500.0, 4.0],
[0.1, 50.0, 5.0, 6.0],
[0.5, 80.0, 8.0, 6.0],
[0.8, 40.0, 100.0, 4.0],
[0.7, 60.0, 20.0, 5.0],
[0.6, 60.0, 10.0, 5.0],
]
obtained_ranking = mcdm.rank(
x_matrix, is_benefit_x=[True, False, False, True],
n_method="Linear2", w_method="CRITIC", s_method="SAW")
expected_ranking = [
("a2", 0.669839),
("a5", 0.647361),
("a3", 0.645343),
("a6", 0.622660),
("a4", 0.587153),
("a1", 0.471261),
]
self.assertEqual(len(obtained_ranking), len(expected_ranking))
for i, tmp in enumerate(obtained_ranking):
self.assertEqual(tmp[0], expected_ranking[i][0])
self.assertAlmostEqual(tmp[1], expected_ranking[i][1], places=6)
def test_rank_saw_critic_dc_linear2(self):
"""Test the rank function with the SAW, CRITIC.DC, Linear2 methods."""
x_matrix = [
[0.9, 30.0, 500.0, 4.0],
[0.1, 50.0, 5.0, 6.0],
[0.5, 80.0, 8.0, 6.0],
[0.8, 40.0, 100.0, 4.0],
[0.7, 60.0, 20.0, 5.0],
[0.6, 60.0, 10.0, 5.0],
]
obtained_ranking = mcdm.rank(
x_matrix, is_benefit_x=[True, False, False, True],
n_method="Linear2", c_method="dCor", w_method="CRITIC",
s_method="SAW")
expected_ranking = [
("a2", 0.677366),
("a5", 0.675493),
("a3", 0.658395),
("a6", 0.652317),
("a4", 0.622630),
("a1", 0.456501),
]
self.assertEqual(len(obtained_ranking), len(expected_ranking))
for i, tmp in enumerate(obtained_ranking):
self.assertEqual(tmp[0], expected_ranking[i][0])
self.assertAlmostEqual(tmp[1], expected_ranking[i][1], places=6)
def test_rank_topsis_em_linear3(self):
"""Test the rank function with the TOPSIS, EM, Linear3 methods."""
x_matrix = [
[0.9, 30.0, 500.0, 4.0],
[0.1, 50.0, 5.0, 6.0],
[0.5, 80.0, 8.0, 6.0],
[0.8, 40.0, 100.0, 4.0],
[0.7, 60.0, 20.0, 5.0],
[0.6, 60.0, 10.0, 5.0],
]
obtained_ranking = mcdm.rank(
x_matrix, is_benefit_x=[True, False, False, True],
n_method="Linear3", w_method="EM", s_method="TOPSIS")
expected_ranking = [
("a6", 0.983188),
("a3", 0.980454),
("a5", 0.968182),
("a2", 0.967595),
("a4", 0.808142),
("a1", 0.033316),
]
self.assertEqual(len(obtained_ranking), len(expected_ranking))
for i, tmp in enumerate(obtained_ranking):
self.assertEqual(tmp[0], expected_ranking[i][0])
self.assertAlmostEqual(tmp[1], expected_ranking[i][1], places=6)
def test_rank_mtopsis_em_linear3(self):
"""Test the rank function with the mTOPSIS, EM, Linear3 methods."""
x_matrix = [
[0.9, 30.0, 500.0, 4.0],
[0.1, 50.0, 5.0, 6.0],
[0.5, 80.0, 8.0, 6.0],
[0.8, 40.0, 100.0, 4.0],
[0.7, 60.0, 20.0, 5.0],
[0.6, 60.0, 10.0, 5.0],
]
obtained_ranking = mcdm.rank(
x_matrix, is_benefit_x=[True, False, False, True],
n_method="Linear3", w_method="EM", s_method="mTOPSIS")
expected_ranking = [
("a6", 0.955577),
("a5", 0.954078),
("a3", 0.938579),
("a2", 0.909531),
("a4", 0.808416),
("a1", 0.096521),
]
self.assertEqual(len(obtained_ranking), len(expected_ranking))
for i, tmp in enumerate(obtained_ranking):
self.assertEqual(tmp[0], expected_ranking[i][0])
self.assertAlmostEqual(tmp[1], expected_ranking[i][1], places=6)
def test_rank_topsis_w_vector(self):
"""Test the rank function with the TOPSIS, w, Vector methods."""
x_matrix = [
[0.9, 30.0, 500.0, 4.0],
[0.1, 50.0, 5.0, 6.0],
[0.5, 80.0, 8.0, 6.0],
[0.8, 40.0, 100.0, 4.0],
[0.7, 60.0, 20.0, 5.0],
[0.6, 60.0, 10.0, 5.0],
]
obtained_ranking = mcdm.rank(
x_matrix, is_benefit_x=[True, False, False, True],
n_method="Vector", w_vector=[0.3, 0.2, 0.4, 0.1],
s_method="TOPSIS")
expected_ranking = [
("a5", 0.868655),
("a6", 0.846338),
("a4", 0.812076),
("a3", 0.789327),
("a2", 0.718801),
("a1", 0.300742),
]
self.assertEqual(len(obtained_ranking), len(expected_ranking))
for i, tmp in enumerate(obtained_ranking):
self.assertEqual(tmp[0], expected_ranking[i][0])
self.assertAlmostEqual(tmp[1], expected_ranking[i][1], places=6)
def test_rank_mtopsis_w_vector(self):
"""Test the rank function with the mTOPSIS, w, Vector methods."""
x_matrix = [
[0.9, 30.0, 500.0, 4.0],
[0.1, 50.0, 5.0, 6.0],
[0.5, 80.0, 8.0, 6.0],
[0.8, 40.0, 100.0, 4.0],
[0.7, 60.0, 20.0, 5.0],
[0.6, 60.0, 10.0, 5.0],
]
obtained_ranking = mcdm.rank(
x_matrix, is_benefit_x=[True, False, False, True],
n_method="Vector", w_vector=[0.3, 0.2, 0.4, 0.1],
s_method="mTOPSIS")
expected_ranking = [
("a5", 0.836287),
("a6", 0.814430),
("a4", 0.805387),
("a3", 0.745801),
("a2", 0.688769),
("a1", 0.341532),
]
self.assertEqual(len(obtained_ranking), len(expected_ranking))
for i, tmp in enumerate(obtained_ranking):
self.assertEqual(tmp[0], expected_ranking[i][0])
self.assertAlmostEqual(tmp[1], expected_ranking[i][1], places=6)
def test_rank_saw_critic(self):
"""Test the rank function with the SAW and CRITIC methods."""
x_matrix = [
[1.000000, 1.000000, 0.017276],
[0.046296, 0.022222, 1.000000],
[0.259295, 0.106985, 0.783554],
[0.260509, 0.107106, 0.801962],
[0.090419, 0.044763, 0.245226],
[0.563999, 0.239328, 0.288358],
[0.320434, 0.147798, 0.738850],
[0.314969, 0.144773, 0.751384],
[0.714533, 0.364252, 0.092688],
[0.972336, 0.706954, 0.091856],
[0.283518, 0.127236, 0.805858],
[0.296781, 0.132676, 0.797796],
[0.265469, 0.122640, 0.202089],
[0.839930, 0.461981, 0.304980],
[0.282103, 0.126395, 0.808264],
[0.296100, 0.132096, 0.799922],
[0.212761, 0.104337, 0.229227],
[0.798002, 0.429797, 0.335956],
[0.068258, 0.035742, 0.519465],
[0.102412, 0.055489, 0.281905],
[0.155229, 0.085050, 0.163012],
[0.238498, 0.128995, 0.103688],
[0.177178, 0.075565, 0.854643],
[0.257650, 0.112055, 0.811516],
[0.294934, 0.131563, 0.781283],
[0.310552, 0.140593, 0.762520],
[0.368115, 0.159646, 0.449073],
[0.498578, 0.228317, 0.296180],
[0.635688, 0.310778, 0.210340],
[0.759518, 0.402583, 0.149893],
[0.499916, 0.188975, 0.302964],
[0.717516, 0.306092, 0.249340],
[0.790702, 0.359737, 0.221402],
[0.848093, 0.415040, 0.193533],
[0.068414, 0.035866, 0.519542],
[0.102469, 0.055554, 0.282188],
[0.155261, 0.085064, 0.162956],
[0.238748, 0.129114, 0.103684],
]
alt_names = [
"Epidemic",
"Direct",
"CnF.LTS",
"CnF.DestEnc",
"CnF.Enc",
"CnF.PRoPHET",
"CnR.LTS",
"CnR.DestEnc",
"CnR.Enc",
"CnR.PRoPHET",
"DF.LTS",
"DF.DestEnc",
"DF.Enc",
"DF.PRoPHET",
"COORD.LTS",
"COORD.DestEnc",
"COORD.Enc",
"COORD.PRoPHET",
"SnW.L2",
"SnW.L4",
"SnW.L8",
"SnW.L16",
"LSF-SnW.L2",
"LSF-SnW.L4",
"LSF-SnW.L8",
"LSF-SnW.L16",
"SnF.L2",
"SnF.L4",
"SnF.L8",
"SnF.L16",
"SimBetTS.L2",
"SimBetTS.L4",
"SimBetTS.L8",
"SimBetTS.L16",
"EBR.L2",
"EBR.L4",
"EBR.L8",
"EBR.L16",
]
obtained_ranking = mcdm.rank(
x_matrix, | |
(13, 16),
(16, 17), (17, 19), (19, 20), (20, 23), (23, 25), (25, 27), (27, 30), (30, 31),
(31, 33), (33, 36), (36, 37), (37, 39), (39, 40), (40, 42), (42, 43), (43, 44),
(44, 47), (47, 49), (49, 51), (51, 53), (53, 56), (56, 57), (57, 58), (58, 59),
(59, 60), (60, 61), (62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ["Hello, y\'all! How are you Ⅷ 😁 😁 😁 ?",
"GluonNLP is great!!!!!!",
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# \'abc\'"]
gt_str_decode = SUBWORD_TEST_SAMPLES
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, SubwordNMTTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_subword_nmt(tokenizer, SUBWORD_TEST_SAMPLES, gt_int_decode, gt_str_decode)
# Case 2, bpe_dropout
# We use str decode here because we may not perfectly recover the original sentence with int decode.
tokenizer = SubwordNMTTokenizer(model_path, vocab_path, bpe_dropout=0.5)
verify_decode(tokenizer, SUBWORD_TEST_SAMPLES, out_type=str)
os.remove(model_path)
os.remove(vocab_path)
def test_huggingface_bpe_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'test_hf_bpe.model')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'test_hf_bpe.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'test_hf_bpe.hf_vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.hf_vocab',
path=hf_vocab_path)
# Case 1, default lowercase=False
tokenizer = HuggingFaceBPETokenizer(model_path, vocab_path)
gt_tokenized = [['Hello</w>', ',</w>', 'y</w>', "'</w>", 'all</w>', '!</w>', 'How</w>',
'are</w>', 'you</w>', '<unk>', '<unk>', '<unk>', '<unk>', '?</w>'],
['Gl', 'u', 'on', 'N', 'LP</w>', 'is</w>', 'great</w>', '!</w>', '!</w>',
'!</w>', '!</w>', '!</w>', '!</w>'],
['Gl', 'u', 'on', 'N', 'LP</w>', '-</w>', 'Amazon</w>', '-</w>', 'H', 'ai',
'bin</w>', '-</w>', 'Leonard</w>', '-</w>', 'Sh', 'en', 'g</w>', '-</w>',
'Sh', 'u', 'ai</w>', '-</w>', 'X', 'ing', 'j', 'ian</w>', '.</w>', '.</w>',
'.</w>', '.</w>', '.</w>', '/</w>', ':</w>', '!</w>', '@</w>', '#</w>',
"'</w>", 'ab', 'c</w>', "'</w>"]]
gt_offsets = [[(0, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21), (22, 25),
(26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (9, 11), (12, 17), (17, 18), (18, 19),
(19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 15), (15, 16), (16, 17),
(17, 19), (19, 22), (22, 23), (23, 30), (30, 31), (31, 33), (33, 35), (35, 36),
(36, 37), (37, 39), (39, 40), (40, 42), (42, 43), (43, 44), (44, 47), (47, 48),
(48, 51), (51, 52), (52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58),
(58, 59), (59, 60), (60, 61), (62, 63), (63, 65), (65, 66), (66, 67)]]
# gt_int_decode = gt_str_decode for hf
# hf removed the unk tokens in decode result
gt_decode = ["Hello , y ' all ! How are you ?",
'GluonNLP is great ! ! ! ! ! !',
"GluonNLP - Amazon - Haibin - Leonard - Sheng - Shuai - Xingjian . . . . . / : ! @ # ' abc '"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=True
gt_lowercase_decode = ["hello , y ' all ! how are you ?",
'gluonnlp is great ! ! ! ! ! !',
"gluonnlp - amazon - haibin - leonard - sheng - shuai - xingjian . . . . . / : ! @ # ' abc '"]
tokenizer = HuggingFaceBPETokenizer(model_path, vocab_path, lowercase=True)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceBPETokenizer(model_path, hf_vocab_path)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(model_path)
os.remove(vocab_path)
os.remove(hf_vocab_path)
def test_huggingface_bytebpe_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_bytebpe.model')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'hf_bytebpe.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'hf_bytebpe.hf_vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.hf_vocab',
path=hf_vocab_path)
# Case 1, default lowercase=False
tokenizer = HuggingFaceByteBPETokenizer(model_path, vocab_path)
gt_tokenized = [['Hello', ',', 'Ġy', "'", 'all', '!', 'ĠHow', 'Ġare', 'Ġyou',
'Ġâ', 'ħ', '§', 'ĠðŁĺ', 'ģ', 'ĠðŁĺ', 'ģ', 'ĠðŁĺ', 'ģ', 'Ġ?'],
['Gl', 'u', 'on', 'N', 'LP', 'Ġis', 'Ġgreat', 'ï¼', 'ģ', 'ï¼',
'ģ', 'ï¼', 'ģ', '!!!'],
['Gl', 'u', 'on', 'N', 'LP', '-', 'Amazon', '-', 'Ha', 'ib', 'in',
'-', 'Le', 'on', 'ard', '-', 'She', 'ng', '-', 'Sh', 'u',
'ai', '-', 'X', 'ing', 'j', 'ian', '.....', '/', ':', '!', '@',
'#', "Ġ'", 'ab', 'c', "'"]]
# the defination of the offsets of bytelevel seems not clear
gt_offsets = [[(0, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17), (17, 21),
(21, 25), (25, 27), (26, 27), (26, 27), (27, 29), (28, 29), (29, 31),
(30, 31), (31, 33), (32, 33), (33, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 11), (11, 17), (17, 18),
(17, 18), (18, 19), (18, 19), (19, 20), (19, 20), (20, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 15), (15, 16),
(16, 18), (18, 20), (20, 22), (22, 23), (23, 25), (25, 27), (27, 30),
(30, 31), (31, 34), (34, 36), (36, 37), (37, 39), (39, 40), (40, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 56),
(56, 57), (57, 58), (58, 59), (59, 60), (60, 61), (61, 63),
(63, 65), (65, 66), (66, 67)]]
gt_decode = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceByteBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=True
gt_lowercase_int_decode = ["hello, y'all! how are you ⅷ 😁 😁 😁 ?",
'gluonnlp is great!!!!!!',
"gluonnlp-amazon-haibin-leonard-sheng-shuai-xingjian...../:!@# 'abc'"]
tokenizer = HuggingFaceByteBPETokenizer(model_path, vocab_path, lowercase=True)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_int_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceByteBPETokenizer(model_path, hf_vocab_path)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceByteBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(model_path)
os.remove(vocab_path)
os.remove(hf_vocab_path)
def test_huggingface_wordpiece_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
vocab_path = os.path.join(dir_path, 'hf_wordpiece.vocab')
download(url=get_repo_url()
+ 'tokenizer_test_models/hf_wordpiece/test_hf_wordpiece.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'hf_wordpiece.hf_vocab')
download(url=get_repo_url()
+ 'tokenizer_test_models/hf_wordpiece/test_hf_wordpiece.hf_vocab',
path=hf_vocab_path)
# Case 1, lowercase=True
tokenizer = HuggingFaceWordPieceTokenizer(vocab_path, lowercase=True)
gt_tokenized = [["hello", ",", "y", "'", "all", "!", "how", "are", "you",
"<unk>", "<unk>", "<unk>", "<unk>", "?"],
["gl", "##uo", "##nn", "##l", "##p", "is", "great", "\uff01",
"\uff01", "\uff01", "!", "!", "!"],
["gl", "##uo", "##nn", "##l", "##p", "-", "amazon", "-", "hai",
"##bin", "-", "leonard", "-", "shen", "##g", "-", "shu", "##ai", "-",
"xin", "##g", "##ji", "##an", ".", ".", ".", ".", ".", "/", ":", "!",
"@", "#", "'", "abc", "'"]]
gt_offsets = [[(0, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21),
(22, 25), (26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 4), (4, 6), (6, 7), (7, 8), (9, 11), (12, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 4), (4, 6), (6, 7), (7, 8), (8, 9), (9, 15), (15, 16), (16, 19),
(19, 22), (22, 23), (23, 30), (30, 31), (31, 35), (35, 36), (36, 37), (37, 40),
(40, 42), (42, 43), (43, 46), (46, 47), (47, 49), (49, 51), (51, 52), (52, 53),
(53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60), (60, 61),
(62, 63), (63, 66), (66, 67)]]
gt_decode = ["hello, y'all! how are you?",
"gluonnlp is great ! ! !!!!",
"gluonnlp - amazon - haibin - leonard - sheng - shuai - xingjian..... / :! @ #'abc '"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceWordPieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=False
gt_lowercase_decode = [", y'all! are you?",
"is great ! ! !!!!",
"- - - - - -..... / :! @ #'abc '"]
tokenizer = HuggingFaceWordPieceTokenizer(vocab_path, lowercase=False)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceWordPieceTokenizer(hf_vocab_path, lowercase=True)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceWordPieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(vocab_path)
os.remove(hf_vocab_path)
@pytest.mark.skipif(parse_version(gluonnlp.utils.lazy_imports.try_import_huggingface_tokenizers().__version__)
>= parse_version('0.9.0.dev0'), reason="Test is only valid for tokenizers 0.8.x")
def test_huggingface_wordpiece_tokenizer_v08():
"""Test for huggingface tokenizer >=0.8"""
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_wordpiece_new_0.8.model')
download(url=get_repo_url() +
'tokenizer_test_models/hf_wordpiece_new_0.8/hf_wordpiece.model',
path=model_path,
sha1_hash='66ccadf6e5e354ff9604e4a82f107a2ac873abd5')
vocab_path = os.path.join(dir_path, 'hf_wordpiece_new_0.8.vocab')
download(url=get_repo_url() +
'tokenizer_test_models/hf_wordpiece_new_0.8/hf_wordpiece.vocab',
path=vocab_path,
sha1_hash='dd6fdf4bbc74eaa8806d12cb3d38a4d9a306aea8')
tokenizer = HuggingFaceTokenizer(model_path, vocab_path)
gt_tokenized = [['Hel', '##lo', ',', 'y', '[UNK]', 'all', '!',
'How', 'are', 'you', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '?'],
['Gl', '##u', '##on', '##N', '##L', '##P', 'is', 'great', '[UNK]',
'[UNK]', '[UNK]', '!', '!', '!'],
['Gl', '##u', '##on', '##N', '##L', '##P', '-',
'Am', '##az', '##on', '-', 'Ha', '##ibi', '##n', '-', 'Leon', | |
water_supply_system_well_name = models.CharField(
max_length=80, blank=True, null=True, verbose_name='Water Supply System Well Name',
db_comment=('The specific name given to a water supply system well. Often, the name reflects which '
'well it is within the system, e.g. Well 1 or South Well'))
static_water_level = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Static Water Level (BTOC)',
db_comment='The level (depth below ground) to which water will naturally rise in a well without '
'pumping, measured in feet.')
well_yield = models.DecimalField(
max_digits=8, decimal_places=3, blank=True, null=True, verbose_name='Estimated Well Yield',
db_comment=('An approximate estimate of the capacity of the well to produce groundwater. Estimated '
'by the well driller during construction by conducting a well yield test. Measured in US '
'Gallons/minute.'))
artesian_flow = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Artesian Flow',
db_comment=('Measurement of the artesian well\'s water flow that occurs naturally due to inherent'
' water pressure in the well. Pressure within the aquifer forces the groundwater to rise'
' above the land surface naturally without using a pump. Flowing artesian wells can flow'
' on an intermittent or continuous basis. Measured in US Gallons/minute.'))
artesian_pressure = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Artesian Pressure',
db_comment=('Pressure of the water coming out of an artesian well as measured at the time of '
'construction. Measured in PSI (pounds per square inch).'))
artesian_pressure_head = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Artesian Pressure head',
db_comment=('Pressure of the water coming out of an artesian well as measured at the time of '
'construction. Measured in ft agl (feet above ground level).'))
artesian_conditions = models.BooleanField(default=False, verbose_name='Artesian Conditions',
db_comment=('Artesian conditions arise when there is a movement of '
'groundwater from a recharge area under a confining '
'formation to a point of discharge at a lower elevation. '
'An example of this is a natural spring, or in the '
'example of the drilling industry, a flowing water well.'))
well_cap_type = models.CharField(
max_length=40, blank=True, null=True, verbose_name='Well Cap')
well_disinfected = models.BooleanField(
default=False, verbose_name='Well Disinfected', choices=((False, 'No'), (True, 'Yes')))
well_disinfected_status = models.ForeignKey(WellDisinfectedCode, db_column='well_disinfected_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Well Disinfected Code')
comments = models.CharField(
max_length=3000, blank=True, null=True,
db_comment=('Free form text used by the user (driller or staff) to include comments for the well.'
' Investiate how staff/developer comments are put in here from activity submission.'))
internal_comments = models.CharField(
max_length=3000, blank=True, null=True)
alternative_specs_submitted = models.BooleanField(
default=False,
verbose_name='Alternative specs submitted (if required)',
choices=((False, 'No'), (True, 'Yes')),
db_comment=('Indicates if an alternative specification was used for siting of a water supply'
' well, or a permanent dewatering well, or for the method used for decommissioning a'
' well.'))
well_yield_unit = models.ForeignKey(
WellYieldUnitCode, db_column='well_yield_unit_code', on_delete=models.PROTECT, blank=True, null=True)
# want to be integer in future
diameter = models.CharField(max_length=9, blank=True)
observation_well_number = models.CharField(
max_length=30, blank=True, null=True, verbose_name="Observation Well Number",
db_comment=('A unique number assigned to a well that has been included as part '
'of the Provincial Groundwater Observation Well Network, e.g., 406.'))
observation_well_status = models.ForeignKey(
ObsWellStatusCode, db_column='obs_well_status_code', blank=True, null=True,
verbose_name="Observation Well Status", on_delete=models.PROTECT,
db_comment=('Status of an observation well within the Provincial Groundwater Observation Well '
'Network, i.e. Active (a well that is currently being used to collect '
'groundwater information), Inactive (a well that is no longer being used to '
'collect groundwater information).'))
ems = models.CharField(max_length=10, blank=True, null=True,
verbose_name="Environmental Monitoring System (EMS) ID")
utm_zone_code = models.CharField(
max_length=10, blank=True, null=True, verbose_name="Zone")
utm_northing = models.IntegerField(
blank=True, null=True, verbose_name="UTM Northing")
utm_easting = models.IntegerField(
blank=True, null=True, verbose_name="UTM Easting")
coordinate_acquisition_code = models.ForeignKey(
CoordinateAcquisitionCode, default='H', blank=True, null=True, verbose_name="Location Accuracy Code",
db_column='coordinate_acquisition_code', on_delete=models.PROTECT,
db_comment=('Codes for the accuracy of the coordinate position, which is best estimated based on'
' the information provided by the data submitter and analysis done by staff. E.g. A,'
' B, C.'))
bcgs_id = models.ForeignKey(BCGS_Numbers, db_column='bcgs_id', on_delete=models.PROTECT, blank=True,
null=True, verbose_name="BCGS Mapsheet Number")
decommission_reason = models.CharField(
max_length=250, blank=True, null=True, verbose_name="Reason for Decommission")
decommission_method = models.ForeignKey(
DecommissionMethodCode, db_column='decommission_method_code', blank=True, null="True",
verbose_name="Method of Decommission", on_delete=models.PROTECT,
db_comment='Valid code for the method used to fill the well to close it permanently.')
decommission_sealant_material = models.CharField(
max_length=100, blank=True, null=True, verbose_name="Decommission Sealant Material")
decommission_backfill_material = models.CharField(
max_length=100, blank=True, null=True, verbose_name="Decommission Backfill Material")
decommission_details = models.CharField(
max_length=250, blank=True, null=True, verbose_name="Decommission Details")
aquifer = models.ForeignKey(
'aquifers.Aquifer', db_column='aquifer_id', on_delete=models.PROTECT, blank=True,
null=True, verbose_name='Aquifer ID Number',
db_comment=('System generated unique sequential number assigned to each mapped aquifer. The'
' aquifer_id identifies which aquifer a well is in. An aquifer can have multiple'
' wells, while a single well can only be in one aquifer.'))
person_responsible = models.ForeignKey('registries.Person', db_column='person_responsible_guid',
on_delete=models.PROTECT,
verbose_name='Person Responsible for Drilling',
null=True, blank=True)
company_of_person_responsible = models.ForeignKey(
'registries.Organization', db_column='org_of_person_responsible_guid', on_delete=models.PROTECT,
verbose_name='Company of person responsible for drilling', null=True, blank=True)
driller_name = models.CharField(
max_length=200, blank=True, null=True, verbose_name='Name of Person Who Did the Work')
consultant_name = models.CharField(
max_length=200, blank=True, null=True, verbose_name='Consultant Name',
db_comment=('Name of consultant (person) that was involved in the construction, alteration, or'
' decommision of a well.'))
consultant_company = models.CharField(
max_length=200, blank=True, null=True, verbose_name='Consultant Company',
db_comment=('Name of consultant company that was involved in the construction, alteration, or'
' decommision of a well.'))
# Aquifer related data
aquifer_vulnerability_index = models.DecimalField(
max_digits=10, decimal_places=0, blank=True, null=True, verbose_name='AVI',
db_comment=('Valid codes that Indicate the aquifer’s relative intrinsic vulnerability to impacts'
' from human activities at the land surface. Vulnerability is based on: the type,'
' thickness, and extent of geologic materials above the aquifer, depth to water'
' table (or to top of confined aquifer), and type of aquifer materials. E.g. H, L, M'))
storativity = models.DecimalField(
max_digits=8, decimal_places=7, blank=True, null=True, verbose_name='Storativity')
transmissivity = models.DecimalField(
max_digits=30, decimal_places=10, blank=True, null=True, verbose_name='Transmissivity')
hydraulic_conductivity = models.TextField(
max_length=100,
blank=True,
null=True,
verbose_name='Hydraulic Conductivity')
specific_storage = models.TextField(
max_length=100,
blank=True,
null=True,
verbose_name='Specific Storage')
specific_yield = models.DecimalField(
max_digits=5, decimal_places=2, blank=True, null=True, verbose_name='Specific Yield')
testing_method = models.TextField(
max_length=100,
blank=True,
null=True,
verbose_name='Testing Method')
testing_duration = models.PositiveIntegerField(blank=True, null=True)
analytic_solution_type = models.DecimalField(
max_digits=5, decimal_places=2, blank=True, null=True, verbose_name='Analytic Solution Type',
db_comment='Mathematical formulation used to estimate hydraulic parameters.')
boundary_effect = models.ForeignKey(BoundaryEffectCode, db_column='boundary_effect_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Boundary Effect',
db_comment='Valid codes for the boundaries observed in '
'pumping test analysis. i.e. CH, NF.')
aquifer_lithology = models.ForeignKey(
AquiferLithologyCode, db_column='aquifer_lithology_code', blank=True, null=True,
on_delete=models.PROTECT,
verbose_name='Aquifer Lithology',
db_comment=('Valid codes for the type of material an aquifer consists of. i.e., Unconsolidated, '
'Bedrock, Unknown.'))
# Production data related data
yield_estimation_method = models.ForeignKey(
YieldEstimationMethodCode, db_column='yield_estimation_method_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Estimation Method')
yield_estimation_rate = models.DecimalField(
max_digits=7, decimal_places=2, verbose_name='Estimation Rate',
blank=True, null=True, validators=[MinValueValidator(Decimal('0.00'))])
yield_estimation_duration = models.DecimalField(
max_digits=9, decimal_places=2, verbose_name='Estimation Duration',
blank=True, null=True, validators=[MinValueValidator(Decimal('0.01'))])
static_level_before_test = models.DecimalField(
max_digits=7, decimal_places=2, verbose_name='SWL Before Test',
blank=True, null=True, validators=[MinValueValidator(Decimal('0.0'))])
drawdown = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True,
validators=[MinValueValidator(Decimal('0.00'))])
hydro_fracturing_performed = models.BooleanField(
default=False, verbose_name='Hydro-fracturing Performed?',
choices=((False, 'No'), (True, 'Yes')))
hydro_fracturing_yield_increase = models.DecimalField(
max_digits=7, decimal_places=2,
verbose_name='Well Yield Increase Due to Hydro-fracturing',
blank=True, null=True,
validators=[MinValueValidator(Decimal('0.00'))])
recommended_pump_depth = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True,
verbose_name='Recommended pump depth',
validators=[MinValueValidator(Decimal('0.00'))])
recommended_pump_rate = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True,
verbose_name='Recommended pump rate',
validators=[MinValueValidator(Decimal('0.00'))])
class Meta:
db_table = 'well'
verbose_name = 'A well record'
def __str__(self):
if self.well_tag_number:
return '%d %s' % (self.well_tag_number, self.street_address)
else:
return 'No well tag number %s' % (self.street_address)
# Custom JSON serialisation for Wells. Expand as needed.
def as_dict(self):
return {
"latitude": self.latitude,
"longitude": self.longitude,
"guid": self.well_guid,
"identification_plate_number": self.identification_plate_number,
"street_address": self.street_address,
"well_tag_number": self.well_tag_number
}
@property
def licenced_status(self):
return LicencedStatusCode.objects.get(licenced_status_code='LICENSED') if self.licences.all().exists() \
else LicencedStatusCode.objects.get(licenced_status_code='UNLICENSED')
@property
def latitude(self):
if self.geom:
return self.geom.y
else:
return None
@property
def longitude(self):
if self.geom:
return self.geom.x
else:
return None
db_table_comment = ('Describes how a well was constructed, altered, decomissioned over time. Includes '
'information related to who owns the well, location of well, the lithologic '
'description as well as other information related to the construction of the well.')
db_column_supplemental_comments = {
"alternative_specs_submitted":"Indicates if an alternative specification was used for siting of a water supply well or a permanent dewatering well, or if an alternative specification was used for decommissioning a well.",
"aquifer_id":"System generated sequential number assigned to each aquifer. It is widely used by groundwater staff as it is the only consistent unique identifier for a mapped aquifer. It is also commonly referred to as Aquifer Number.",
"artesian_flow":"Measurement of the artesian well's water flow | |
"""WRITEME"""
import sys
if sys.version_info[:2] >= (2,5):
from collections import defaultdict
# otherwise it's implemented in python25.py
import theano
import toolbox
import graph
from theano.gof.python25 import deque
from env import InconsistencyError
class ProtocolError(Exception):
"""WRITEME"""
pass
class DestroyHandler(object):
"""WRITEME"""
def __init__(self, do_imports_on_attach=True):
self.map = {}
self.do_imports_on_attach=do_imports_on_attach
def on_attach(self, env):
dh = self.map.setdefault(env, DestroyHandlerHelper2(do_imports_on_attach=self.do_imports_on_attach))
dh.on_attach(env)
def on_detach(self, env):
self.map[env].on_detach(env)
def on_import(self, env, op):
self.map[env].on_import(env, op)
def on_prune(self, env, op):
self.map[env].on_prune(env, op)
def on_change_input(self, env, node, i, r, new_r):
self.map[env].on_change_input(env, node, i, r, new_r)
def validate(self, env):
self.map[env].validate(env)
def orderings(self, env):
return self.map[env].orderings(env)
def _dfs_toposort(i, r_out, orderings):
"""
i - list of inputs
o - list of outputs
orderings - dict of additions to the normal inputs and outputs
Returns nothing. Raises exception for graph with cycles
"""
#this is hard-coded reimplementation of functions from graph.py
# reason: go faster, prepare for port to C.
assert isinstance(r_out, (tuple, list, deque))
# TODO: For more speed - use a defaultdict for the orderings
iset = set(i)
if 0:
def expand(obj):
rval = []
if obj not in iset:
if isinstance(obj, graph.Variable):
if obj.owner:
rval = [obj.owner]
if isinstance(obj, graph.Apply):
rval = list(obj.inputs)
rval.extend(orderings.get(obj, []))
else:
assert not orderings.get(obj, [])
return rval
expand_cache = {}
# reachable, clients = stack_search( deque(r_out), deps, 'dfs', True)
start=deque(r_out)
rval_set = set()
rval_set.add(id(None))
rval_list = list()
expand_inv = {}
sources = deque()
while start:
l = start.pop()# this makes the search dfs
if id(l) not in rval_set:
rval_list.append(l)
rval_set.add(id(l))
if l in iset:
assert not orderings.get(l, [])
expand_l = []
else:
try:
if l.owner:
expand_l = [l.owner]
else:
expand_l = []
except AttributeError:
expand_l = list(l.inputs)
expand_l.extend(orderings.get(l, []))
if expand_l:
for r in expand_l:
expand_inv.setdefault(r, []).append(l)
start.extend(expand_l)
else:
sources.append(l)
expand_cache[l] = expand_l
assert len(rval_list) == len(rval_set)-1
rset = set()
rlist = []
while sources:
node = sources.popleft()
if node not in rset:
rlist.append(node)
rset.add(node)
for client in expand_inv.get(node, []):
expand_cache[client] = [a for a in expand_cache[client] if a is not node]
if not expand_cache[client]:
sources.append(client)
if len(rlist) != len(rval_list):
raise ValueError('graph contains cycles')
#return [o for o in rlist if isinstance(o, graph.Apply)]
def getroot(r, view_i):
"""
For views: Return non-view variable which is ultimatly viewed by r.
For non-views: return self.
"""
try:
return getroot(view_i[r], view_i)
except KeyError:
return r
def add_impact(r, view_o, impact):
"""
In opposition to getroot, which finds the variable that is viewed *by* r, this function
returns all the variables that are views of r.
:param impact: is a set of variables that are views of r
:param droot: a dictionary mapping views -> r
"""
for v in view_o.get(r,[]):
impact.add(v)
add_impact(v, view_o, impact)
def get_impact(root, view_o):
impact = set()
add_impact(root, view_o, impact)
return impact
def fast_inplace_check(inputs):
""" Return the variables in inputs that are posible candidate for as inputs of inplace operation
:type inputs: list
:param inputs: inputs Variable that you want to use as inplace destination
"""
env = inputs[0].env
protected_inputs = [f.protected for f in env._features if isinstance(f,theano.compile.function_module.Supervisor)]
protected_inputs = sum(protected_inputs,[])#flatten the list
protected_inputs.extend(env.outputs)
inputs = [i for i in inputs if
not isinstance(i,graph.Constant)
and not env.destroyers(i)
and i not in protected_inputs]
return inputs
class DestroyHandlerHelper2(toolbox.Bookkeeper):
"""
The DestroyHandlerHelper2 class detects when a graph is impossible to evaluate because of
aliasing and destructive operations.
Several data structures are used to do this.
When an Op uses its view_map property to declare that an output may be aliased
to an input, then if that output is destroyed, the input is also considering to be
destroyed. The view_maps of several Ops can feed into one another and form a directed graph.
The consequence of destroying any variable in such a graph is that all variables in the graph
must be considered to be destroyed, because they could all be refering to the same
underlying storage. In the current implementation, that graph is a tree, and the root of
that tree is called the foundation. The `droot` property of this class maps from every
graph variable to its foundation. The `impact` property maps backward from the foundation
to all of the variables that depend on it. When any variable is destroyed, this class marks
the foundation of that variable as being destroyed, with the `root_destroyer` property.
"""
droot = {}
"""
destroyed view + nonview variables -> foundation
"""
impact = {}
"""
destroyed nonview variable -> it + all views of it
"""
root_destroyer = {}
"""
root -> destroyer apply
"""
def __init__(self, do_imports_on_attach=True):
self.env = None
self.do_imports_on_attach = do_imports_on_attach
def on_attach(self, env):
#boilerplate from old implementation
if self.env is not None:
raise Exception("A DestroyHandler instance can only serve one Env.")
for attr in ('destroyers', 'destroy_handler'):
if hasattr(env, attr):
raise toolbox.AlreadyThere("DestroyHandler feature is already present or in conflict with another plugin.")
def get_destroyers_of(r):
droot, impact, root_destroyer = self.refresh_droot_impact()
try:
return [root_destroyer[droot[r]]]
except Exception:
return []
env.destroyers = get_destroyers_of
env.destroy_handler = self
self.env = env
self.destroyers = set() #set of Apply instances with non-null destroy_map
self.view_i = {} # variable -> variable used in calculation
self.view_o = {} # variable -> set of variables that use this one as a direct input
#clients: how many times does an apply use a given variable
self.clients = {} # variable -> apply -> ninputs
self.stale_droot = True
self.debug_all_apps = set()
if self.do_imports_on_attach:
toolbox.Bookkeeper.on_attach(self, env)
def refresh_droot_impact(self):
if self.stale_droot:
self.droot, self.impact, self.root_destroyer = self._build_droot_impact()
self.stale_droot = False
return self.droot, self.impact, self.root_destroyer
def _build_droot_impact(self):
droot = {} # destroyed view + nonview variables -> foundation
impact = {} # destroyed nonview variable -> it + all views of it
root_destroyer = {} # root -> destroyer apply
for app in self.destroyers:
for output_idx, input_idx_list in app.op.destroy_map.items():
if len(input_idx_list) != 1:
raise NotImplementedError()
input_idx = input_idx_list[0]
input = app.inputs[input_idx]
input_root = getroot(input, self.view_i)
if input_root in droot:
raise InconsistencyError("Multiple destroyers of %s" % input_root)
droot[input_root] = input_root
root_destroyer[input_root] = app
#input_impact = set([input_root])
#add_impact(input_root, self.view_o, input_impact)
input_impact = get_impact(input_root, self.view_o)
for v in input_impact:
assert v not in droot
droot[v] = input_root
impact[input_root] = input_impact
impact[input_root].add(input_root)
return droot, impact, root_destroyer
def on_detach(self, env):
if env is not self.env:
raise Exception("detaching wrong env", env)
del self.destroyers
del self.view_i
del self.view_o
del self.clients
del self.stale_droot
assert self.env.destroyer_handler is self
delattr(self.env, 'destroyers')
delattr(self.env, 'destroy_handler')
self.env = None
def on_import(self, env, app):
"""Add Apply instance to set which must be computed"""
if app in self.debug_all_apps: raise ProtocolError("double import")
self.debug_all_apps.add(app)
#print 'DH IMPORT', app, id(app), id(self), len(self.debug_all_apps)
# If it's a destructive op, add it to our watch list
if getattr(app.op, 'destroy_map', {}):
self.destroyers.add(app)
# add this symbol to the forward and backward maps
for o_idx, i_idx_list in getattr(app.op, 'view_map', {}).items():
if len(i_idx_list) > 1:
raise NotImplementedError('destroying this output invalidates multiple inputs', (app.op))
o = app.outputs[o_idx]
i = app.inputs[i_idx_list[0]]
self.view_i[o] = i
self.view_o.setdefault(i,set()).add(o)
# update self.clients
for i, input in enumerate(app.inputs):
self.clients.setdefault(input, {}).setdefault(app,0)
self.clients[input][app] += 1
for i, output in enumerate(app.outputs):
self.clients.setdefault(output, {})
self.stale_droot = True
def on_prune(self, env, app):
"""Remove Apply instance from set which must be computed"""
if app not in self.debug_all_apps: raise ProtocolError("prune without import")
self.debug_all_apps.remove(app)
#UPDATE self.clients
for i, input in enumerate(set(app.inputs)):
del self.clients[input][app]
if getattr(app.op, 'destroy_map', {}):
self.destroyers.remove(app)
# Note: leaving empty client dictionaries in the struct.
# Why? It's a pain to remove them. I think they aren't doing any harm, they will be
# deleted on_detach().
#UPDATE self.view_i, self.view_o
for o_idx, i_idx_list in getattr(app.op, 'view_map', {}).items():
if len(i_idx_list) > 1:
#destroying this output invalidates multiple inputs
raise NotImplementedError()
o = app.outputs[o_idx]
i = app.inputs[i_idx_list[0]]
del self.view_i[o]
self.view_o[i].remove(o)
if not self.view_o[i]:
del self.view_o[i]
self.stale_droot = True
def on_change_input(self, env, app, i, old_r, new_r):
"""app.inputs[i] changed from old_r to new_r """
if app == 'output':
# app == 'output' is special key that means Env is redefining which nodes are being
# considered 'outputs' of the graph.
pass
else:
if app not in self.debug_all_apps: raise ProtocolError("change without import")
#UPDATE self.clients
self.clients[old_r][app] -= 1
if self.clients[old_r][app] | |
await self.scout_points()
for waypoint in waypoints:
self.do(self.puuhapete.move(waypoint, queue=True))
# if self.chat:
# await self._client.chat_send("Scout sent.", team_only=False)
self.scout_sent = True
self.send_scout = False
if self.techlabs_and_reactors.amount > 2:
self.do(self.puuhapete.attack(self.techlabs_and_reactors.closest_to(self.puuhapete)))
# mine mineral wall in golden wall
if self.mine_mineral_wall and self.ccANDoc.ready.amount == 2:
rich_mineralfield = self.mineral_field.of_type(
[PURIFIERRICHMINERALFIELD, PURIFIERRICHMINERALFIELD750])
mfss = self.mineral_field.of_type([MINERALFIELD450])
if not rich_mineralfield:
self.mine_mineral_wall = False
elif rich_mineralfield and mfss:
self.mine_mineral_wall = False
print("Clearing path to underworld.")
mf = min(mfss, key=lambda x: x.distance_to(self.start_location))
task_force = self.scvs.take(6)
# self.do(self.puuhapete.gather(mf))
for worker in task_force:
if worker.is_carrying_minerals:
self.do(worker(AbilityId.HARVEST_RETURN))
self.do(worker.gather(mf, queue=True))
else:
self.do(worker.gather(mf, queue=True))
## fix broken things
units_in_repair_group = 0
new_fixer = None
for fixer in self.scvs:
if fixer.is_in_repair_group:
units_in_repair_group += 1
if (units_in_repair_group < self.minimum_repairgroup
or ((self.build_cc_home or self.ccANDoc.amount >= 4) and units_in_repair_group < 4)):
possible_fixers = self.scvs.filter(
lambda x: x.is_carrying_minerals and not x.is_puuhapete and not x.is_in_repair_group)
if possible_fixers:
new_fixer = random.choice(possible_fixers)
if new_fixer:
self.add_unit_to_repair_group(new_fixer)
print("New fixer assigned")
repailable_units = (self.vikingassault |
self.hellions |
self.cyclones |
self.siegetanks |
self.mines_burrowed |
self.siegetanks_sieged |
self.medivacs |
self.battlecruisers |
self.vikingassault |
self.thors)
if self.repair_group and self.iteraatio % 16 == 0 and not self.home_in_danger:
ajoneuvot = repailable_units.ready.closer_than(15, self.homeBase)
rakennukset = self.structures.ready.exclude_type([TECHLAB, REACTOR])
potilaat = (ajoneuvot | rakennukset).filter(lambda x: x.health_percentage < 1)
if self.marines.amount == 1 and self.enemy_units.of_type(UnitTypeId.ZEALOT) and self.time < 180:
target = self.enemy_units.of_type(UnitTypeId.ZEALOT).closest_to(self.marines.first)
for fixer in self.repair_group:
self.do(fixer.attack(target))
elif potilaat:
# print(potilaat)
for fixer in self.repair_group:
potilas = potilaat.closest_to(fixer)
wall = self.structures(UnitTypeId.SUPPLYDEPOT).filter(lambda x: x.health_percentage < 1)
if wall:
potilas = wall.sorted(lambda x: x.health_percentage, reverse=False)[0]
self.do(fixer(EFFECT_REPAIR_SCV, potilas))
self.do(fixer(EFFECT_REPAIR_SCV, potilaat.random, queue=True))
# print(fixer.orders)
# print(fixer.orders[0])
# self.do(fixer.move(self.start_location.random_on_distance(6), queue = True))
## ghost reporting
max_energy = 0
nuke_ordered = False
sniped_targets = []
can_snipe = True
self.emp_timer += 1
if not self.nuke_target:
self.nuke_spotter_tag = None
if self.emp_timer > 2:
can_emp = True
else:
can_emp = False
spotter_still_alive = False
for ghost in self.ghosts:
if ghost.tag == self.nuke_spotter_tag:
spotter_still_alive = True
self.nuke_spotter_last_alive_spot = ghost.position
if len(ghost.orders) > 0:
if ghost.orders[0].ability.id in [AbilityId.EFFECT_GHOSTSNIPE]:
sniped_targets.append(ghost.orders[0].target)
ghost.can_nuke = False
ghost.next_in_line = False
if len(ghost.orders) >= 1:
if ghost.orders[0].ability.id in [AbilityId.TACNUKESTRIKE_NUKECALLDOWN]:
nuke_ordered = True
if ghost.energy > max_energy and ghost.health_percentage >= 1:
max_energy = ghost.energy
if not spotter_still_alive and self.nuke_spotter_tag:
self.nuke_spotter_last_died_spot = self.nuke_spotter_last_alive_spot
self.nuke_spotter_tag = None
print("spotter died")
# if sniped_targets:
# print("sniped targets", sniped_targets)
for ghost in self.ghosts:
if ghost.energy == max_energy and await self.has_ability(TACNUKESTRIKE_NUKECALLDOWN, ghost):
ghost.next_in_line = True
if not nuke_ordered:
ghost.can_nuke = True
break
targets_for_snipe = self.enemy_units_and_structures.not_structure.exclude_type(
units_to_ignore_ghost).filter(
lambda x: x.is_biological and x.tag not in sniped_targets)
for ghost in self.ghosts:
if len(ghost.orders) > 0 and ghost.orders[0].ability.id in [AbilityId.TACNUKESTRIKE_NUKECALLDOWN]:
self.nuke_spotter_tag = None
self.nuke_target = None
continue
if ghost.tag == self.nuke_spotter_tag:
await self.ghost_nuke_spotter_micro(ghost)
continue
if len(ghost.orders) > 0 and ghost.orders[0].ability.id in [AbilityId.EFFECT_GHOSTSNIPE]:
continue
potential_targets_EMP = (
self.enemy_units_and_structures.not_structure.exclude_type(units_to_ignore_ghost)
.filter(lambda x: x.shield > 40).closer_than(11 + ghost.radius, ghost))
potential_targets = targets_for_snipe.closer_than(11 + ghost.radius, ghost)
known_enemies = self.enemy_units_and_structures.not_structure
if (self.NukesLeft <= 0 and ghost.health_percentage < 0.9 and await self.can_cast(ghost,
AbilityId.BEHAVIOR_CLOAKON_GHOST)
and self.enemy_units_and_structures.filter(lambda x: x.can_attack_ground).closer_than(20,
ghost)
and self.medivacs):
self.do(ghost(AbilityId.BEHAVIOR_CLOAKON_GHOST))
continue
if await self.can_cast(ghost, AbilityId.BEHAVIOR_CLOAKOFF_GHOST):
if self.NukesLeft <= 0 and ghost.health_percentage >= 1:
self.do(ghost(AbilityId.BEHAVIOR_CLOAKOFF_GHOST))
continue
detectors = (self.enemy_units | self.enemy_structures).filter(lambda x: x.is_detector)
if detectors.closer_than(12, ghost) and ghost.distance_to(self.homeBase) > 20:
self.do(ghost.move(self.homeBase.position))
continue
if await self.avoid_own_nuke(ghost):
continue
if await self.avoid_enemy_siegetanks(ghost):
continue
if ghost.can_nuke and await self.has_ability(TACNUKESTRIKE_NUKECALLDOWN, ghost):
if self.nuke_enemy_home:
if ghost.energy > 70 and self.already_pending(UpgradeId.PERSONALCLOAKING) > 0.75:
if self.enemy_structures.closer_than(3, self.enemy_natural):
target = self.enemy_natural
else:
target = self.enemy_start_location
self.nuke_spotter_tag = ghost.tag
self.nuke_target = target
self.nuke_enemy_home = False
if self.chat:
await self._client.chat_send("Nuke enemy base.", team_only=False)
return
elif (self.enemy_structures.exists
and ghost.energy > 75):
if not self.nuke_target:
expansions_sorted = sorted(self.expansion_locations_list,
key=lambda p: p.distance_to(self.enemy_start_location),
reverse=True)
for base in expansions_sorted:
if self.enemy_structures.ready.closer_than(3, base):
if base.position == self.enemy_natural:
continue
if base.position == self.enemy_start_location:
continue
else:
self.nuke_target = base.position
break
if not self.nuke_target:
self.nuke_target = random.choice(self.enemy_structures).position
if not self.nuke_spotter_tag:
self.nuke_spotter_tag = ghost.tag
if ghost.weapon_cooldown != 0:
threaths = self.enemy_units_and_structures.filter(
lambda x: x.can_attack_ground and x.distance_to(
ghost) < x.radius + x.ground_range + ghost.radius + 2)
if threaths:
threath = threaths.closest_to(ghost.position)
self.do(ghost.move(ghost.position.towards(threath, -6)))
continue
if (ghost.health_percentage < 0.5 and await self.can_cast(ghost, AbilityId.BEHAVIOR_CLOAKON_GHOST)
and self.enemy_units_and_structures.filter(lambda x: x.can_attack_ground).closer_than(20,
ghost)
and self.medivacs):
self.do(ghost(AbilityId.BEHAVIOR_CLOAKON_GHOST))
continue
elif not ghost.next_in_line:
if known_enemies.closer_than(11 + ghost.radius, ghost):
if potential_targets_EMP and ghost.energy >= 75 and can_emp:
potential_targets_EMP = potential_targets_EMP.sorted(lambda x: (x.shield), reverse=True)
target = potential_targets_EMP[0]
can_emp = False
self.emp_timer = 0
self.do(ghost(AbilityId.EMP_EMP, target.position))
print("Ghost: EMP", target.name, target.shield)
continue
if potential_targets and ghost.energy >= 70 and can_snipe:
potential_targets = potential_targets.sorted(lambda x: (x.health + x.shield),
reverse=True)
target = potential_targets[0]
can_snipe = False
self.do(ghost(AbilityId.EFFECT_GHOSTSNIPE, target))
print("Ghost: SNIPE", target.name)
continue
if targets_for_snipe and ghost.energy >= 70:
target = targets_for_snipe.closest_to(ghost)
self.do(ghost.attack(target.position))
continue
# light_enemies_in_range = known_enemies.closer_than(ghost_range + ghost.radius, ghost).filter(lambda x: x.is_light)
# if light_enemies_in_range:
# enemies_in_range_sorted = light_enemies_in_range.sorted(lambda x: (x.health + x.shield), reverse=True)
# target = enemies_in_range_sorted[0]
# self.do(ghost.attack(target))
# continue
if self.NukesLeft and await self.can_cast(ghost, AbilityId.BEHAVIOR_CLOAKOFF_GHOST):
if not self.enemy_units.closer_than(20, ghost):
self.do(ghost(AbilityId.BEHAVIOR_CLOAKOFF_GHOST))
continue
if self.NukesLeft <= 0 and self.enemy_units_and_structures and (
ghost.health_percentage >= 1 or not self.medivacs):
self.do(ghost.attack(self.enemy_units_and_structures.closest_to(ghost).position))
continue
if self.general and self.thors and ghost.energy > 50:
if ghost.distance_to(self.general.position) > 10:
self.do(ghost.move(self.general.position))
continue
if ghost.position.to2.distance_to(self.start_location) > 10:
self.do(ghost.move(self.start_location))
continue
## bunkers
if self.bunkers.ready and self.marines:
if self.kamikaze_target or self.ccANDoc.amount >= 4:
for bunker in self.bunkers.ready:
# abilities = await self.get_available_abilities(bunker)
if await self.has_ability(UNLOADALL_BUNKER, bunker):
self.do(bunker(AbilityId.UNLOADALL_BUNKER))
continue # continue for loop, dont execute any of the following
elif await self.has_ability(EFFECT_SALVAGE, bunker):
self.do(bunker(AbilityId.EFFECT_SALVAGE))
continue # continue for loop, dont execute any of the following
else:
for bunker in self.bunkers.ready:
if await self.has_ability(LOAD_BUNKER, bunker):
marines = self.marines.closer_than(5, bunker)
if marines:
self.do(bunker(AbilityId.LOAD_BUNKER, marines.closest_to(bunker)))
continue # continue for loop, dont execute any of the following
if self.iteraatio % 3 == 0:
if self.limit_vespene > 0 and (self.minerals > 4000 or self.ccANDoc.amount >= 6):
self.limit_vespene = 0
await self.build_refinery()
if self.ccANDoc.ready.amount == 1 and not self.delay_first_expansion:
if self.enemy_units_and_structures.closer_than(30,
self.homeBase).amount > 2 and not self.delay_expansion:
self.delay_first_expansion = True
self.first_base_saturation += 3
await self.build_workers(self.scv_limit)
if not await self.we_should_expand():
await self.safkaa()
elif self.build_cc_home and self.supplydepots.amount < 3 and self.barracks:
await self.safkaa()
if self.enemy_structures.of_type(UnitTypeId.PLANETARYFORTRESS):
self.kamikaze_target = None
self.clear_units_in_kamikaze_troops()
can_build = True
# You have to import "from sc2.units import Units" to make this code work
# raxes_with_reactors = sc2.units.Units([], self) # creates empty Units objects that is populated later
# raxes_with_techlabs = sc2.units.Units([], self)
# raxes_without_addon = sc2.units.Units([], self)
#
# for rax in self.structures.of_type(UnitTypeId.BARRACKS).ready: # cycles through every rax that is ready
# if rax.add_on_tag == 0 and rax.is_idle: # if no add_on attached to rax then .add_on_tag == 0
# raxes_without_addon.append(rax) # appends rax Unit object to raxes_without_addon Units object list
# continue
# for add_on in self.structures(UnitTypeId.BARRACKSREACTOR).ready: # cycles through every reactor
# if rax.add_on_tag == add_on.tag and len(rax.orders) < 2: # compares tags between rax and reactor tags
# raxes_with_reactors.append(rax) # appends rax Unit object to raxes_with_reactors Units object list
# continue
# for add_on in self.structures(UnitTypeId.BARRACKSTECHLAB).ready: # cycles through every techlab
# if rax.add_on_tag == add_on.tag and rax.is_idle: # compares tags between rax and reactor tags
# raxes_with_techlabs.append(rax) # appends rax Unit object to raxes_with_reactors Units object list
# continue
#
# # now you have all raxes divided in three new Units objects.
# # Note that these objects contain only raxes that have free production slots available.
# # now yo can use thase Units objects in your own code.
# # In example below it train marines only in raxes with reactors
#
# br = None
# if raxes_with_reactors:
# br = raxes_with_reactors.first
# if br:
# self.do(br.train(UnitTypeId.MARINE)) # I don't know how this line is supposed to be in current Burnysc2
# TODO make new method for this
if self.take_third_first:
if self.ccANDoc.closer_than(3, self.take_third_first):
self.take_third_first = False
if self.enemy_structures.of_type(UnitTypeId.DARKSHRINE) and not self.scan_cloaked_enemies:
self.build_missile_turrets = True
self.fast_engineeringbay = True
self.scan_cloaked_enemies = True
self.raven_left = 100
print("DARKSHRINE detected!")
if self.chat:
await self._client.chat_send("Dark templars? I should prepare for that.", team_only=False)
if self.ccANDoc.ready.amount + self.townhalls_flying.amount >= 4:
self.wait_until_4_orbital_ready = False
if self.marauder_push_limit != 0 and self.enemy_units.of_type(UnitTypeId.STALKER).amount >= 8:
self.marauder_push_limit = 0
if self.marauders.amount >= self.marauder_push_limit > 0:
self.marauder_push_limit = 0
for unit | |
0), JzKet(S(1)/2, S(-1)/2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, -1), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2),
JzKet(S(1)/2, S(1)/2), JzKet(1, -1), JzKet(S(1)/2, S(1)/2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, -1), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(
S(1)/2, S(1)/2), JzKet(1, -1), JzKet(S(1)/2, S(-1)/2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 1), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2),
JzKet(S(1)/2, S(-1)/2), JzKet(1, 1), JzKet(S(1)/2, S(1)/2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 1), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(
S(1)/2, S(-1)/2), JzKet(1, 1), JzKet(S(1)/2, S(-1)/2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 0), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2),
JzKet(S(1)/2, S(-1)/2), JzKet(1, 0), JzKet(S(1)/2, S(1)/2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 0), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(
S(1)/2, S(-1)/2), JzKet(1, 0), JzKet(S(1)/2, S(-1)/2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, -1), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(
S(1)/2, S(-1)/2), JzKet(1, -1), JzKet(S(1)/2, S(1)/2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, -1), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(
S(1)/2, S(-1)/2), JzKet(1, -1), JzKet(S(1)/2, S(-1)/2)) )))
# Couple j1+j3=j13, j2+j4=j24, j13+j24=j
# j1=1/2, j2=1/2, j3=1/2, j4=1/2
assert TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2)), ((1, 3), (2, 4), (1, 2)) )))
# j1=1/2, j2=1/2, j3=1, j4=1/2
assert TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 1), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 1), JzKet(S(1)/2, S(1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 1), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 1), JzKet(S(1)/2, S(-1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 0), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 0), JzKet(S(1)/2, S(1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 0), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 0), JzKet(S(1)/2, S(-1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, -1), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, -1), JzKet(S(1)/2, S(1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, -1), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, -1), JzKet(S(1)/2, S(-1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 1), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 1), JzKet(S(1)/2, S(1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 1), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 1), JzKet(S(1)/2, S(-1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 0), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 0), JzKet(S(1)/2, S(1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 0), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 0), JzKet(S(1)/2, S(-1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, -1), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, -1), JzKet(S(1)/2, S(1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, -1), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, -1), JzKet(S(1)/2, S(-1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 1), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 1), JzKet(S(1)/2, S(1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 1), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 1), JzKet(S(1)/2, S(-1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 0), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 0), JzKet(S(1)/2, S(1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 0), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 0), JzKet(S(1)/2, S(-1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, -1), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, -1), JzKet(S(1)/2, S(1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, -1), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, -1), JzKet(S(1)/2, S(-1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 1), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 1), JzKet(S(1)/2, S(1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 1), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 1), JzKet(S(1)/2, S(-1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 0), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 0), JzKet(S(1)/2, S(1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 0), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 0), JzKet(S(1)/2, S(-1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, -1), JzKet(S(1)/2, S(1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, -1), JzKet(S(1)/2, S(1)/2)), ((1, 3), (2, 4), (1, 2)) )))
assert TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, -1), JzKet(S(1)/2, S(-1)/2)) == \
expand(uncouple(couple( TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, -1), JzKet(S(1)/2, S(-1)/2)), ((1, 3), (2, 4), (1, 2)) )))
def test_uncouple_2_coupled_states_numerical():
# j1=1/2, j2=1/2
assert uncouple(JzKetCoupled(0, 0, (S(1)/2, | |
test_seqs if s.metadata["id"] in test_ids)
test_seqs = DNAIterator(test_seqs)
test_seqs = Artifact.import_data("FeatureData[Sequence]", test_seqs)
logging.info(
"Commencing classification of "
+ str(len(list(test_seqs.view(DNAIterator))))
+ " sequences"
)
classification = feature_classifier.methods.classify_sklearn(
test_seqs, classifier, confidence=confidence, n_jobs=n_jobs
)
classification = classification.classification
logging.info("Got some classifications")
return classification
def classify_samples_keras(
test_samples,
train_taxa,
ref_seqs,
classifier_spec,
confidence,
n_jobs,
weights=None,
sequence_encoder="Seq2VecEncoder",
read_length=150,
vec_length=300,
batch_size=2048,
epochs=5,
):
classifier = feature_classifier.methods.fit_classifier_keras(
ref_seqs,
train_taxa,
class_weight=weights,
classifier_specification=classifier_spec,
sequence_encoder=sequence_encoder,
read_length=read_length,
vec_length=vec_length,
batch_size=batch_size,
epochs=epochs,
n_jobs=n_jobs
)
classifier = classifier.classifier
test_ids = set(test_samples.ids(axis="observation"))
test_seqs = ref_seqs.view(DNAIterator)
test_seqs = (s for s in test_seqs if s.metadata["id"] in test_ids)
test_seqs = DNAIterator(test_seqs)
test_seqs = Artifact.import_data("FeatureData[Sequence]", test_seqs)
logging.info(
"Commencing classification of "
+ str(len(list(test_seqs.view(DNAIterator))))
+ " sequences"
)
classification = feature_classifier.methods.classify_keras(
test_seqs, classifier, confidence=confidence, batch_size=batch_size
)
classification = classification.classification
logging.info("Got some classifications")
return classification
def create_stochastic_classifier(
ref_taxa,
ref_seqs,
confidence,
weights=None,
):
by_seq = defaultdict(dict)
for seq in ref_seqs.view(DNAIterator):
taxon = ref_taxa[seq.metadata['id']]
if not weights:
weight = 1
elif weights.exists(taxon, 'observation'):
weight = float(weights.get_value_by_ids(taxon, 'Weight'))
else:
weight = float(weights.min())
by_seq[str(seq)][taxon] = weight
classifier = {}
for seq, weights in by_seq.items():
total = sum(weights.values())
weighted = []
for taxon, weight in weights.items():
weighted.append((weight/total, taxon + '; '))
while True:
new_weighted = Counter()
for weight, taxon in weighted:
new_weighted[taxon.rsplit('; ', 1)[0]] += weight
taxa, probs = map(numpy.array, zip(*new_weighted.items()))
taxa = taxa[probs >= confidence]
probs = probs[probs >= confidence]
taxa_with_confidence = list(zip(probs, taxa))
probs /= probs.sum()
if len(taxa) > 0:
classifier[seq] = taxa_with_confidence, probs
break
weighted = [(w, t) for t, w in new_weighted.items()]
def stochastic_classifier(seq):
a, p = classifier[seq]
return a[numpy.random.choice(range(len(p)), p=p)]
return stochastic_classifier
def classify_samples_stochastic(classifier, ref_seqs, test_samples):
test_ids = set(test_samples.ids(axis="observation"))
test_seqs = ref_seqs.view(DNAIterator)
test_seqs = [s for s in test_seqs if s.metadata["id"] in test_ids]
logging.info(
"Commencing classification of "
+ str(len(test_seqs))
+ " sequences"
)
seq_ids, taxonomy, confidence = [], [], []
for seq in test_seqs:
seq_ids.append(seq.metadata['id'])
c, t = classifier(str(seq))
taxonomy.append(t)
confidence.append(c)
classification = DataFrame({'Taxon': taxonomy, 'Confidence': confidence},
index=seq_ids, columns=['Taxon', 'Confidence'])
classification.index.name = 'Feature ID'
classification = Artifact.import_data(
"FeatureData[Taxonomy]", classification)
logging.info("Got some classifications")
return classification
def create_perfect_classifier(
ref_taxa,
ref_seqs,
confidence,
weights=None,
):
by_seq = defaultdict(dict)
for seq in ref_seqs.view(DNAIterator):
taxon = ref_taxa[seq.metadata['id']]
if not weights:
weight = 1
elif weights.exists(taxon, 'observation'):
weight = float(weights.get_value_by_ids(taxon, 'Weight'))
else:
weight = float(weights.min())
by_seq[str(seq)][taxon] = weight
classifier = {}
for seq, weights in by_seq.items():
total = sum(weights.values())
weighted = []
for taxon, weight in weights.items():
weighted.append((weight/total, taxon + '; '))
while True:
new_weighted = Counter()
for weight, taxon in weighted:
new_weighted[taxon.rsplit('; ', 1)[0]] += weight
most_common = new_weighted.most_common()
most_common = [mc for mc in most_common
if numpy.isclose(most_common[0][1], mc[1])]
mc = most_common[numpy.random.randint(len(most_common))]
if mc[1] >= confidence:
classifier[seq] = mc[1], mc[0]
break
weighted = [(w, t) for t, w in new_weighted.items()]
return classifier
def classify_samples_perfect(classifier, ref_seqs, test_samples):
test_ids = set(test_samples.ids(axis="observation"))
test_seqs = ref_seqs.view(DNAIterator)
test_seqs = [s for s in test_seqs if s.metadata["id"] in test_ids]
logging.info(
"Commencing classification of "
+ str(len(test_seqs))
+ " sequences"
)
seq_ids, taxonomy, confidence = [], [], []
for seq in test_seqs:
seq_ids.append(seq.metadata['id'])
c, t = classifier[str(seq)]
taxonomy.append(t)
confidence.append(c)
classification = DataFrame({'Taxon': taxonomy, 'Confidence': confidence},
index=seq_ids, columns=['Taxon', 'Confidence'])
classification.index.name = 'Feature ID'
classification = Artifact.import_data(
"FeatureData[Taxonomy]", classification)
logging.info("Got some classifications")
return classification
def save_expected(results_dir, test_samples, expected, train_taxa):
known_taxa = set()
for taxon in set(train_taxa.view(DataFrame)["Taxon"].values):
while ";" in taxon:
known_taxa.add(taxon)
taxon, _ = taxon.rsplit(";", 1)
known_taxa.add(taxon)
for sid in expected:
taxon = expected[sid]
while taxon not in known_taxa:
taxon, _ = taxon.rsplit(";", 1)
expected[sid] = taxon
expected_dir = join(results_dir, "expected")
if not os.path.exists(expected_dir):
os.mkdir(expected_dir)
abundance_dir = join(results_dir, "abundance")
if not os.path.exists(abundance_dir):
os.mkdir(abundance_dir)
for sample_id in test_samples.ids():
sample = extract_sample([sample_id], test_samples)
ids = sample.ids(axis="observation")
taxa = [expected[s] for s in ids]
df = DataFrame({"Taxon": taxa}, index=ids, columns=["Taxon"])
df.index.name = "Feature ID"
Artifact.import_data("FeatureData[Taxonomy]", df).save(
join(expected_dir, sample_id + ".qza")
)
df = DataFrame(
dict(zip(ids, sample.data(sample_id))),
index=["Frequency"],
columns=ids,
)
Artifact.import_data("FeatureTable[Frequency]", df).save(
join(abundance_dir, sample_id + ".qza")
)
def load_simulated_samples(fold, results_dir):
with open(join(fold, "sample_test.json")) as fp:
sample_ids = json.load(fp)
expected_dir = join(results_dir, "expected")
test_samples = defaultdict(set)
for sample_id in sample_ids:
expected = Artifact.load(join(expected_dir, sample_id + ".qza"))
for obs_id in expected.view(DataFrame).index:
test_samples[obs_id].add(sample_id)
obs_ids = list(test_samples)
data = dok_matrix((len(obs_ids), len(sample_ids)))
s_map = {s: i for i, s in enumerate(sample_ids)}
o_map = {o: i for i, o in enumerate(obs_ids)}
for obs_id in test_samples:
for sample_id in test_samples[obs_id]:
data[o_map[obs_id], s_map[sample_id]] = 1
test_samples = Table(data, obs_ids, sample_ids)
return test_samples
def simulate_samples(
taxonomy_samples, fold, taxon_defaults, ref_taxa, ref_seqs
):
with open(join(fold, "sample_test.json")) as fp:
test_samples = json.load(fp)
test_samples = extract_sample(test_samples, taxonomy_samples)
ref_taxa, _ = load_references(ref_taxa, ref_seqs)
with open(join(fold, "seq_test.json")) as fp:
test_seqs = json.load(fp)
test_taxa = {ref_taxa[sid] for sid in test_seqs}
hits = [0]
direct_remaps = [0]
indirect_remaps = [0]
def collapse(taxon, _):
if taxon in test_taxa:
hits[0] += 1
return taxon
if taxon_defaults[taxon][0] in test_taxa:
direct_remaps[0] += 1
return taxon_defaults[taxon][0]
for try_taxon in taxon_defaults[taxon][1:]:
if try_taxon in test_taxa:
indirect_remaps[0] += 1
return try_taxon
test_samples = test_samples.collapse(
collapse, norm=False, axis="observation"
)
logging.info("Test taxon remaps")
logging.info(str(hits[0]) + " hits")
logging.info(str(direct_remaps[0]) + " direct remaps")
logging.info(str(indirect_remaps[0]) + " indirect remaps")
samples = []
obs_ids = []
expected = []
taxa_ref = defaultdict(list)
for sid, taxon in ref_taxa.items():
if sid in test_seqs:
taxa_ref[taxon].append(sid)
for abundances, taxon, _ in test_samples.iter(axis="observation"):
taxa = taxa_ref[taxon]
n_taxa = len(taxa)
obs_ids.extend(taxa)
expected.extend(ref_taxa[sid] for sid in taxa)
taxa_samples = numpy.vstack([abundances // n_taxa] * n_taxa)
# magic
taxa = cycle(range(n_taxa))
for i, r in enumerate(abundances % n_taxa):
for t, _ in zip(taxa, range(int(r))):
taxa_samples[t, i] += 1
assert (taxa_samples.sum(axis=0) == abundances).all()
samples.append(taxa_samples)
test_samples = Table(numpy.vstack(samples), obs_ids, test_samples.ids())
test_samples.filter(
lambda v, _, __: v.sum() > 1e-9, axis="observation", inplace=True
)
return (test_samples, dict(zip(obs_ids, expected)))
def get_train_artifacts(
taxonomy_samples, fold, taxon_defaults, ref_taxa, ref_seqs, weights=None
):
if weights is None:
with open(join(fold, "sample_train.json")) as fp:
train_samples = json.load(fp)
train_samples = extract_sample(train_samples, taxonomy_samples)
else:
train_samples = weights.view(Table)
ref_taxa, ref_seqs = load_references(ref_taxa, ref_seqs)
with open(join(fold, "seq_train.json")) as fp:
train_seqs = json.load(fp)
train_taxa = {ref_taxa[sid] for sid in train_seqs}
hits = [0]
direct_remaps = [0]
indirect_remaps = [0]
def collapse(taxon, _):
if taxon in train_taxa:
hits[0] += 1
return taxon
if taxon_defaults[taxon][0] in train_taxa:
direct_remaps[0] += 1
return taxon_defaults[taxon][0]
for try_taxon in taxon_defaults[taxon][1:]:
if try_taxon in train_taxa:
indirect_remaps[0] += 1
return try_taxon
train_samples = train_samples.collapse(
collapse, axis="observation", norm=False
)
logging.info("Train taxon remaps")
logging.info(str(hits[0]) + " hits")
logging.info(str(direct_remaps[0]) + " direct remaps")
logging.info(str(indirect_remaps[0]) + " indirect remaps")
train_samples = Artifact.import_data(
"FeatureTable[Frequency]", train_samples
)
train_taxa = list(train_taxa)
eye_taxonomy = DataFrame(
{"Taxon": train_taxa}, index=train_taxa, columns=["Taxon"]
)
eye_taxonomy.index.name = "Feature ID"
eye_taxonomy = Artifact.import_data("FeatureData[Taxonomy]", eye_taxonomy)
train_taxa = [ref_taxa[sid] for sid in train_seqs]
train_taxonomy = DataFrame(
{"Taxon": train_taxa}, index=train_seqs, columns=["Taxon"]
)
train_taxonomy.index.name = "Feature ID"
train_taxonomy = Artifact.import_data(
"FeatureData[Taxonomy]", train_taxonomy
)
train_iter = DNAIterator(
s for s in ref_seqs if s.metadata["id"] in train_seqs
)
train_art = Artifact.import_data("FeatureData[Sequence]", train_iter)
unobserved_weight = 1e-6 if weights is None else 0.0
weights = clawback.methods.generate_class_weights(
train_taxonomy,
train_art,
train_samples,
eye_taxonomy,
unobserved_weight=unobserved_weight,
)
ref_seqs = Artifact.import_data(
"FeatureData[Sequence]", DNAIterator(ref_seqs)
)
return train_taxonomy, train_art, ref_seqs, weights.class_weight
def map_svs_to_taxa(empirical_samples, ref_taxa, ref_seqs, n_jobs):
ref_taxa = Artifact.import_data(
"FeatureData[Taxonomy]",
ref_taxa,
view_type="HeaderlessTSVTaxonomyFormat",
)
ref_seqs = Artifact.import_data("FeatureData[Sequence]", ref_seqs)
classifier = feature_classifier.methods.fit_classifier_naive_bayes(
ref_seqs,
ref_taxa,
classify__alpha=0.001,
feat_ext__ngram_range="[7,7]",
)
classifier = classifier.classifier
samples = Artifact.import_data(
"FeatureTable[Frequency]", empirical_samples
)
svs = clawback.methods.sequence_variants_from_samples(samples)
svs = svs.sequences
sv_taxa = feature_classifier.methods.classify_sklearn(
svs, classifier, confidence=0.0, n_jobs=n_jobs
).classification
sv_taxa = sv_taxa.view(DataFrame)["Taxon"]
return samples.view(Table).collapse(
lambda sid, _: sv_taxa[sid], axis="observation", norm=False
)
def get_sequence_strata(k, ref_taxa, ref_seqs, n_jobs):
taxonomy, ref_seqs = load_references(ref_taxa, ref_seqs)
taxa_stats = Counter(taxonomy.values())
strata = {t: [t] for t in taxonomy.values() if taxa_stats[t] >= k}
kref = (s for s in ref_seqs if taxonomy[s.metadata["id"]] in strata)
ref_art = Artifact.import_data("FeatureData[Sequence]", DNAIterator(kref))
tax_art = Artifact.import_data(
"FeatureData[Taxonomy]",
ref_taxa,
view_type="HeaderlessTSVTaxonomyFormat",
)
classifier = feature_classifier.methods.fit_classifier_naive_bayes(
ref_art, tax_art, classify__alpha=0.001, feat_ext__ngram_range="[7,7]"
)
classifier = classifier.classifier
tiddlers = DNAIterator(
s for s in ref_seqs if taxonomy[s.metadata["id"]] not in strata
)
tid_art = Artifact.import_data("FeatureData[Sequence]", tiddlers)
tid_tax = feature_classifier.methods.classify_sklearn(
tid_art, classifier, confidence=0.0, n_jobs=n_jobs
)
tid_tax = tid_tax.classification.view(DataFrame)
stratum_votes = defaultdict(Counter)
for sid in tid_tax.index:
stratum_votes[taxonomy[sid]][tid_tax["Taxon"][sid]] += float(
tid_tax["Confidence"][sid]
)
taxon_defaults = {}
for taxon in stratum_votes:
most_common = stratum_votes[taxon].most_common()
merge_taxon, max_conf = most_common[0]
assert len(most_common) == 1 or most_common[1][1] != max_conf
taxon_defaults[taxon] = strata[merge_taxon]
strata[merge_taxon].append(taxon)
taxon_defaults.update(strata)
seq_ids = [s.metadata["id"] for s | |
The sound id to search by
Note: Can be found in the URL of the sound specific page or with other methods.
count: The number of posts to return
Note: seems to only support up to ~2,000
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
response = []
while len(response) < count:
if count < maxCount:
realCount = count
else:
realCount = maxCount
query = {
"secUid": "",
"musicID": str(id),
"count": str(realCount),
"cursor": offset,
"shareUid": "",
"language": language,
}
api_url = "{}api/music/item_list/?{}&{}".format(
BASE_URL, self.__add_new_params__(), urlencode(query)
)
res = self.getData(url=api_url, **kwargs)
try:
for t in res["items"]:
response.append(t)
except KeyError:
for t in res["itemList"]:
response.append(t)
if not res["hasMore"]:
logging.info("TikTok isn't sending more TikToks beyond this point.")
return response
realCount = count - len(response)
offset = res["cursor"]
return response[:count]
def get_music_object(self, id, **kwargs) -> dict:
"""Returns a music object for a specific sound id.
Parameters
----------
id: The sound id to get the object for
This can be found by using other methods.
"""
return self.getMusicObjectFull(id, **kwargs)["music"]
def get_music_object_full(self, id, **kwargs):
"""Returns a music object for a specific sound id.
Parameters
----------
id: The sound id to get the object for
This can be found by using other methods.
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
r = requests.get(
"https://www.tiktok.com/music/-{}".format(id),
headers={
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"authority": "www.tiktok.com",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
"Host": "www.tiktok.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
},
proxies=self.__format_proxy(kwargs.get("proxy", None)),
cookies=self.get_cookies(**kwargs),
)
t = r.text
j_raw = parse_script_tag_contents(t)
return json.loads(j_raw)["props"]["pageProps"]["musicInfo"]
def by_hashtag(self, hashtag, count=30, offset=0, **kwargs) -> dict:
"""Returns a dictionary listing TikToks with a specific hashtag.
Parameters
----------
hashtag: The hashtag to search by
Without the # symbol
A valid string is "funny"
count: The number of posts to return
Note: seems to only support up to ~2,000
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
id = self.getHashtagObject(hashtag)["challengeInfo"]["challenge"]["id"]
response = []
required_count = count
while len(response) < required_count:
if count > maxCount:
count = maxCount
query = {
"count": count,
"challengeID": id,
"type": 3,
"secUid": "",
"cursor": offset,
"priority_region": "",
}
api_url = "{}api/challenge/item_list/?{}&{}".format(
BASE_URL, self.__add_new_params__(), urlencode(query)
)
res = self.getData(url=api_url, **kwargs)
for t in res["itemList"]:
response.append(t)
if not res["hasMore"]:
logging.info("TikTok isn't sending more TikToks beyond this point.")
return response
offset += maxCount
return response[:required_count]
def get_hashtag_object(self, hashtag, **kwargs) -> dict:
"""Returns a hashtag object.
Parameters
----------
hashtag: The hashtag to search by
Without the # symbol
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
query = {"name": hashtag, "isName": True, "lang": language}
api_url = "{}node/share/tag/{}?{}&{}".format(
BASE_URL, quote(hashtag), self.__add_new_params__(), urlencode(query)
)
data = self.getData(url=api_url, **kwargs)
if data["challengeInfo"].get("challenge") is None:
raise TikTokNotFoundError("Challenge {} does not exist".format(hashtag))
return data
def get_recommended_tiktoks_by_video_id(
self, id, count=30, minCursor=0, maxCursor=0, **kwargs
) -> dict:
"""Returns a dictionary listing reccomended TikToks for a specific TikTok video.
Parameters
----------
id: The id of the video to get suggestions for
Can be found using other methods
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
response = []
first = True
while len(response) < count:
if count < maxCount:
realCount = count
else:
realCount = maxCount
query = {
"count": realCount,
"id": 1,
"secUid": "",
"maxCursor": maxCursor,
"minCursor": minCursor,
"sourceType": 12,
"appId": 1233,
"region": region,
"priority_region": region,
"language": language,
}
api_url = "{}api/recommend/item_list/?{}&{}".format(
BASE_URL, self.__add_new_params__(), urlencode(query)
)
res = self.getData(url=api_url, **kwargs)
for t in res.get("items", []):
response.append(t)
if not res["hasMore"] and not first:
logging.info("TikTok isn't sending more TikToks beyond this point.")
return response[:count]
realCount = count - len(response)
maxCursor = res["maxCursor"]
first = False
return response[:count]
def get_tiktok_by_id(self, id, **kwargs) -> dict:
"""Returns a dictionary of a specific TikTok.
Parameters
----------
id: The id of the TikTok you want to get the object for
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
did = kwargs.get("custom_did", None)
query = {
"itemId": id,
"language": language,
}
api_url = "{}api/item/detail/?{}&{}".format(
BASE_URL, self.__add_new_params__(), urlencode(query)
)
return self.getData(url=api_url, **kwargs)
def get_tiktok_by_url(self, url, **kwargs) -> dict:
"""Returns a dictionary of a TikTok object by url.
Parameters
----------
url: The TikTok url you want to retrieve
This currently doesn't support the shortened TikTok
url links.
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
custom_did = kwargs.get("custom_did", None)
if "@" in url and "/video/" in url:
post_id = url.split("/video/")[1].split("?")[0]
else:
raise Exception(
"URL format not supported. Below is an example of a supported url.\n"
"https://www.tiktok.com/@therock/video/6829267836783971589"
)
return self.getTikTokById(
post_id,
**kwargs,
)
def get_tiktok_by_html(self, url, **kwargs) -> dict:
"""This method retrieves a TikTok using the html
endpoints rather than the API based ones.
Parameters
----------
url: The url of the TikTok to get
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
r = requests.get(
url,
headers={
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"authority": "www.tiktok.com",
"path": url.split("tiktok.com")[1],
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
"Host": "www.tiktok.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
},
proxies=self.__format_proxy(kwargs.get("proxy", None)),
cookies=self.get_cookies(**kwargs),
)
t = r.text
try:
j_raw = parse_script_tag_contents(t)
except IndexError:
if not t:
logging.error("TikTok response is empty")
else:
logging.error("TikTok response: \n " + t)
raise TikTokCaptchaError()
data = json.loads(j_raw)["props"]["pageProps"]
if data["serverCode"] == 404:
raise TikTokNotFoundError(
"TikTok with that url doesn't exist".format(username)
)
return data
def discover_hashtags(self, **kwargs) -> dict:
"""Discover page, consists challenges (hashtags)"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
query = {"noUser": 1, "userCount": 30, "scene": 0}
api_url = "{}node/share/discover?{}&{}".format(
BASE_URL, self.__add_new_params__(), urlencode(query)
)
return self.getData(url=api_url, **kwargs)["body"][1]["exploreList"]
def discover_music(self, **kwargs) -> dict:
"""Discover page, consists of music"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
query = {"noUser": 1, "userCount": 30, "scene": 0}
api_url = "{}node/share/discover?{}&{}".format(
BASE_URL, self.__add_new_params__(), urlencode(query)
)
return self.getData(url=api_url, **kwargs)["body"][2]["exploreList"]
def get_user_object(self, username, **kwargs) -> dict:
"""Gets a user object (dictionary)
Parameters
----------
username: The username of the user
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
return self.getUser(username, **kwargs)["userInfo"]["user"]
def get_user(self, username, **kwargs) -> dict:
"""Gets the full exposed user object
Parameters
----------
username: The username of the user
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
r = requests.get(
"https://tiktok.com/@{}?lang=en".format(quote(username)),
headers={
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"authority": "www.tiktok.com",
"path": "/@{}".<EMAIL>(quote(username)),
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
"Host": "www.tiktok.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
},
proxies=self.__format_proxy(kwargs.get("proxy", None)),
cookies=self.get_cookies(**kwargs),
)
t = r.text
try:
j_raw = parse_script_tag_contents(t)
except IndexError:
if not t:
logging.error("Tiktok response is empty")
else:
logging.error("Tiktok response: \n " + t)
raise TikTokCaptchaError()
user = json.loads(j_raw)["props"]["pageProps"]
if user["serverCode"] == 404:
raise TikTokNotFoundError(
"TikTok user with username {} does not exist".format(username)
)
return user
def get_suggested_users_by_id(
self, userId="6745191554350760966", count=30, **kwargs
) -> list:
"""Returns suggested users given a different TikTok user.
Parameters
----------
userId: The id of the user to get suggestions for
count: The amount of users to return, optional
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
query = {
"noUser": 0,
"pageId": userId,
"userId": userId,
"userCount": count,
"scene": 15,
}
api_url = "{}node/share/discover?{}&{}".format(
BASE_URL, self.__add_new_params__(), urlencode(query)
)
res = []
for x in self.getData(url=api_url, **kwargs)["body"][0]["exploreList"]:
res.append(x["cardItem"])
return res[:count]
def get_suggested_users_by_id_crawler(
self, count=30, startingId="6745191554350760966", **kwargs
) -> list:
"""Crawls for listing of all user objects it can find.
Parameters
----------
count: The amount of users to crawl for
startingId: The ID of a TikTok user to start at, optional
Optional but uses a static one to start, so you may get more
unique results with setting your own.
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
users = []
unusedIDS = [startingId]
while len(users) < count:
userId = random.choice(unusedIDS)
newUsers = self.getSuggestedUsersbyID(userId=userId, **kwargs)
unusedIDS.remove(userId)
for user in newUsers:
if user not in users:
users.append(user)
unusedIDS.append(user["id"])
return users[:count]
def get_suggested_hashtags_by_id(
| |
<filename>cgmm.py
import torch
from pydgn.model.interface import ModelInterface
from util import compute_bigram, compute_unigram
from torch_geometric.nn import global_mean_pool, global_add_pool
from torch_scatter import scatter_add, scatter_max
class CGMM(ModelInterface):
def __init__(self, dim_node_features, dim_edge_features, dim_target, readout_class, config):
super().__init__(dim_node_features, dim_edge_features, dim_target, readout_class, config)
self.device = None
self.readout_class = readout_class
self.is_first_layer = config['depth'] == 1
self.depth = config['depth']
self.training = False
self.return_node_embeddings = False
self.K = dim_node_features
self.Y = dim_target
self.L = len(config['prev_outputs_to_consider'])
self.A = config['A']
self.C = config['C']
self.C2 = config['C'] + 1
self.CS = config.get('CS', None)
self.is_graph_classification = self.CS is not None
# self.add_self_arc = config['self_arc'] if 'self_arc' in config else False
self.use_continuous_states = config['infer_with_posterior']
self.unibigram = config['unibigram']
self.aggregation = config['aggregation']
self.readout = readout_class(dim_node_features, dim_edge_features,
dim_target, config)
if self.is_first_layer:
self.transition = BaseTransition(self.C)
else:
self.transition = CGMMTransition(self.C, self.A,
self.C2, self.L)
self.init_accumulators()
def init_accumulators(self):
self.readout.init_accumulators()
self.transition.init_accumulators()
# Do not delete this!
if self.device: # set by to() method
self.to(self.device)
def to(self, device):
super().to(device)
self.device = device
def train(self):
self.readout.train()
self.transition.train()
self.training = True
def eval(self):
self.readout.eval()
self.transition.eval()
self.training = False
def forward(self, data):
extra = None
if not self.is_first_layer:
data, extra = data[0], data[1]
return self.e_step(data, extra)
def e_step(self, data, extra=None):
x, y, batch = data.x, data.y, data.batch
prev_stats = None if self.is_first_layer else extra.vo_outs
if prev_stats is not None:
prev_stats.to(self.device)
# --------------------------- FORWARD PASS --------------------------- #
# t = time.time()
# --- TRANSITION contribution
if self.is_first_layer:
# p_Q_given_obs --> ?xC
p_Q_given_obs = self.transition.e_step(x)
transition_posterior = p_Q_given_obs
rightmost_term = p_Q_given_obs
else:
# p_Q_given_obs --> ?xC / transition_posterior --> ?xLxAxCxC2
p_Q_given_obs, transition_posterior, rightmost_term = self.transition.e_step(x, prev_stats)
# assert torch.allclose(p_Q_given_obs.sum(1), torch.tensor([1.]).to(self.device)), p_Q_given_obs.sum(1)
# print(f"Transition E-step time: {time.time()-t}"); t = time.time()
# --- READOUT contribution
# true_log_likelihood --> ?x1 / readout_posterior --> ?xCSxCN or ?xCN
true_log_likelihood, readout_posterior, emission_target = self.readout.e_step(p_Q_given_obs, x, y, batch)
# print(f"Readout E-step time: {time.time()-t}"); t = time.time()
# likely_labels --> ? x Y
likely_labels = self.readout.infer(p_Q_given_obs, x, batch)
# print(f"Readout inference time: {time.time()-t}"); t = time.time()
# -------------------------------------------------------------------- #
if not self.is_graph_classification:
complete_log_likelihood, eui = self._e_step_node(x, y, p_Q_given_obs,
transition_posterior, rightmost_term,
readout_posterior, emission_target,
batch)
else:
complete_log_likelihood, eui = self._e_step_graph(x, y, p_Q_given_obs,
transition_posterior, rightmost_term,
readout_posterior, emission_target,
batch)
# print(f"Posterior E-step time: {time.time()-t}"); t = time.time()
num_nodes = x.shape[0]
# CGMM uses the true posterior (over node attributes) as it is unsupervised!
# Different from IO version
if self.return_node_embeddings:
# print("Computing intermediate outputs")
assert not self.training
statistics_batch = self._compute_statistics(eui, data, self.device)
node_unigram = compute_unigram(eui, self.use_continuous_states)
graph_unigram = self._get_aggregation_fun()(node_unigram, batch)
if self.unibigram:
node_bigram = compute_bigram(eui.float(), data.edge_index, batch,
self.use_continuous_states)
graph_bigram = self._get_aggregation_fun()(node_bigram, batch)
node_embeddings_batch = torch.cat((node_unigram, node_bigram), dim=1)
graph_embeddings_batch = torch.cat((graph_unigram, graph_bigram), dim=1)
else:
node_embeddings_batch = node_unigram
graph_embeddings_batch = graph_unigram
# to save time during debug
embeddings = (None, None, graph_embeddings_batch, statistics_batch, None, None)
else:
embeddings = None
return likely_labels, embeddings, complete_log_likelihood, \
true_log_likelihood, num_nodes
def _e_step_graph(self, x, y, p_Q_given_obs, transition_posterior,
rightmost_term, readout_posterior, emission_target, batch):
# batch (i.e., replicate) graph readout posterior for all nodes
b_readout_posterior = readout_posterior[batch] # ?nxCSxCN
if self.is_first_layer:
# ----------------------------- Posterior ---------------------------- #
# expand
exp_readout_posterior = b_readout_posterior.reshape((-1, self.CS,
self.C))
# expand
exp_transition_posterior = transition_posterior.unsqueeze(1)
# batch graph sizes + expand
b_graph_sizes = scatter_add(torch.ones_like(batch).to(self.device),
batch)[batch].reshape([-1, 1, 1])
unnorm_posterior_estimate = torch.div(torch.mul(exp_readout_posterior,
exp_transition_posterior),
b_graph_sizes)
Z = global_add_pool(unnorm_posterior_estimate.sum((1, 2), keepdim=True), batch)
Z[Z == 0.] = 1.
esui = unnorm_posterior_estimate / (Z[batch]) # --> ?n x CS x CN
eui = esui.sum(1) # ?n x CN
if self.training:
# Update the accumulators (also useful for minibatch training)
self.readout._m_step(x, y, esui, batch)
self.transition._m_step(x, y, eui)
# -------------------------------------------------------------------- #
# ---------------------- Complete Log Likelihood --------------------- #
complete_log_likelihood_readout = self.readout.complete_log_likelihood(esui, emission_target, batch)
complete_log_likelihood_transition = self.transition.complete_log_likelihood(eui, p_Q_given_obs)
complete_log_likelihood = complete_log_likelihood_readout + complete_log_likelihood_transition
# -------------------------------------------------------------------- #
else:
# ----------------------------- Posterior ---------------------------- #
# expand
exp_readout_posterior = b_readout_posterior.reshape((-1, self.CS,
1, 1,
self.C, 1))
# expand
exp_transition_posterior = transition_posterior.unsqueeze(1)
# batch graph sizes + expand
b_graph_sizes = scatter_add(torch.ones_like(batch).to(self.device),
batch)[batch].reshape([-1, 1, 1, 1, 1, 1])
unnorm_posterior_estimate = torch.div(torch.mul(exp_readout_posterior,
exp_transition_posterior),
b_graph_sizes)
Z = global_add_pool(unnorm_posterior_estimate.sum((1, 2, 3, 4, 5), keepdim=True), batch)
Z[Z == 0.] = 1.
esuilaj = unnorm_posterior_estimate / (Z[batch]) # --> ?n x CS x L x A x C x C2
euilaj = esuilaj.sum(1) # Marginalize over CS --> transition M-step
euila = euilaj.sum(4) # ?n x L x A x C
euil = euila.sum(2) # ?n x L x C
esui = esuilaj.sum((2, 3, 5)) # Marginalize over L,A,C2 --> readout M-step
eui = euil.sum(1) # ?n x C
if self.training:
# Update the accumulators (also useful for minibatch training)
self.readout._m_step(x, y, esui, batch)
self.transition._m_step(x, y, euilaj, euila, euil)
# -------------------------------------------------------------------- #
# ---------------------- Complete Log Likelihood --------------------- #
complete_log_likelihood_readout = self.readout.complete_log_likelihood(esui, emission_target, batch)
complete_log_likelihood_transition = self.transition.complete_log_likelihood(euilaj, euila, euil,
rightmost_term)
complete_log_likelihood = complete_log_likelihood_readout + complete_log_likelihood_transition
# -------------------------------------------------------------------- #
return complete_log_likelihood, eui
def _e_step_node(self, x, y, p_Q_given_obs, transition_posterior,
rightmost_term, readout_posterior, emission_target, batch):
if self.is_first_layer:
# ----------------------------- Posterior ---------------------------- #
unnorm_posterior_estimate = readout_posterior * transition_posterior
Z = unnorm_posterior_estimate.sum(1, keepdim=True)
Z[Z == 0.] = 1.
eui = unnorm_posterior_estimate / Z # --> ? x CN
if self.training:
# Update the accumulators (also useful for minibatch training)
self.readout._m_step(x, y, eui, batch)
self.transition._m_step(x, y, eui)
# -------------------------------------------------------------------- #
# ---------------------- Complete Log Likelihood --------------------- #
complete_log_likelihood_readout = self.readout.complete_log_likelihood(eui, emission_target, batch)
complete_log_likelihood_transition = self.transition.complete_log_likelihood(eui, p_Q_given_obs)
complete_log_likelihood = complete_log_likelihood_readout + complete_log_likelihood_transition
# -------------------------------------------------------------------- #
else:
# ----------------------------- Posterior ---------------------------- #
# expand
exp_readout_posterior = readout_posterior.reshape((-1, 1, 1, self.C, 1))
unnorm_posterior_estimate = torch.mul(exp_readout_posterior,
transition_posterior)
Z = unnorm_posterior_estimate.sum((1, 2, 3, 4), keepdim=True)
Z[Z == 0.] = 1.
euilaj = unnorm_posterior_estimate / Z # --> ?n x L x A x C x C2
euila = euilaj.sum(4) # ?n x L x A x C
euil = euila.sum(2) # ?n x L x C
eui = euil.sum(1) # ?n x C
if self.training:
# Update the accumulators (also useful for minibatch training)
self.readout._m_step(x, y, eui, batch)
self.transition._m_step(x, y, euilaj, euila, euil)
# -------------------------------------------------------------------- #
# ---------------------- Complete Log Likelihood --------------------- #
complete_log_likelihood_readout = self.readout.complete_log_likelihood(eui, emission_target, batch)
complete_log_likelihood_transition = self.transition.complete_log_likelihood(euilaj, euila, euil,
rightmost_term)
complete_log_likelihood = complete_log_likelihood_readout + complete_log_likelihood_transition
# -------------------------------------------------------------------- #
# assert torch.allclose(eui.sum(1), torch.tensor([1.]).to(self.device)), eui.sum(1)[eui.sum(1) != 1.]
return complete_log_likelihood, eui
def m_step(self):
self.readout.m_step()
self.transition.m_step()
self.init_accumulators()
def stopping_criterion(self, depth, max_layers, train_loss, train_score, val_loss, val_score,
dict_per_layer, layer_config, logger=None):
return depth == max_layers
def _compute_statistics(self, posteriors, data, device):
statistics = torch.full((posteriors.shape[0], self.A + 1, posteriors.shape[1] + 1), 0., dtype=torch.float32).to(
device)
srcs, dsts = data.edge_index
if self.A == 1:
sparse_adj_matr = torch.sparse_coo_tensor(data.edge_index, \
torch.ones(data.edge_index.shape[1], dtype=posteriors.dtype).to(
device), \
torch.Size([posteriors.shape[0],
posteriors.shape[0]])).to(device).transpose(0, 1)
statistics[:, 0, :-1] = torch.sparse.mm(sparse_adj_matr, posteriors)
else:
for arc_label in range(self.A):
sparse_label_adj_matr = torch.sparse_coo_tensor(data.edge_index, \
(data.edge_attr == arc_label).to(device).float(), \
torch.Size([posteriors.shape[0],
posteriors.shape[0]])).to(device).transpose(
0, 1)
statistics[:, arc_label, :-1] = torch.sparse.mm(sparse_label_adj_matr, posteriors)
# Deal with nodes with degree 0: add a single fake neighbor with uniform posterior
degrees = statistics[:, :, :-1].sum(dim=[1, 2]).floor()
statistics[degrees == 0., :, :] = 1. / self.C2
'''
if self.add_self_arc:
statistics[:, self.A, :-1] += posteriors
'''
# use self.A+1 as special edge for bottom states (all in self.C2-1)
max_arieties, _ = self._compute_max_ariety(degrees.int().to(self.device), data.batch)
max_arieties[max_arieties == 0] = 1
statistics[:, self.A, self.C] += degrees / max_arieties[data.batch].float()
return statistics
def _compute_sizes(self, batch, device):
return scatter_add(torch.ones(len(batch), dtype=torch.int).to(device), batch)
def _compute_max_ariety(self, degrees, batch):
return scatter_max(degrees, batch)
def _get_aggregation_fun(self):
if self.aggregation == 'mean':
aggregate = global_mean_pool
elif self.aggregation == 'sum':
aggregate = global_add_pool
return aggregate
class CGMMTransition(torch.nn.Module):
def __init__(self, c, a, c2, l):
super().__init__()
self.device = None
self.eps = 1e-8 # Laplace smoothing
self.C = c
self.orig_A = a
self.A = a + 1 # bottom state connected with a special arc
self.C2 = c2
self.L = l
self.layerS = torch.nn.Parameter(torch.nn.init.uniform_(torch.empty(self.L, dtype=torch.float32)),
requires_grad=False)
self.arcS = torch.nn.Parameter(torch.zeros((self.L, self.A), dtype=torch.float32), requires_grad=False)
self.transition = torch.nn.Parameter(torch.empty([self.L, self.A, self.C, self.C2], dtype=torch.float32),
requires_grad=False)
self.layerS /= self.layerS.sum() # inplace
for layer in range(self.L):
self.arcS[layer, :] = torch.nn.init.uniform_(self.arcS[layer, :])
self.arcS[layer, :] /= self.arcS[layer, :].sum()
for arc in range(self.A):
for j in range(self.C2):
tr = torch.nn.init.uniform_(torch.empty(self.C))
self.transition[layer, arc, :, j] = tr / tr.sum()
# These are variables where I accumulate intermediate minibatches' results
# These are needed by the M-step update equations at the end of an epoch
self.layerS_numerator = | |
import matplotlib.pyplot as plt
import jax.numpy as np
class LikelihoodFreeInference:
"""Base class (some functionality) for likelihood free inference methods
Mostly used for plotting
Parameters
----------
gridsize : list
The number of grid points to evaluate the marginal distribution on for
each parameter
ranges : list
A list of arrays containing the gridpoints for the marginal
distribution for each parameter
marginal : list of lists
A list of rows and columns of marginal distributions of a corner plot
Methods
-------
prior
A prior distribution which can be evaluated and sampled from (should
also contain a ``low`` and a ``high`` attribute with appropriate
ranges)
Todo
----
pytests need writing
"""
def __init__(self, prior, gridsize=100, marginals=None):
"""Constructor method
Parameters
----------
prior: fn
A prior distribution which can be evaluated and sampled from
(should also contain a ``low`` and a ``high`` attribute with
appropriate ranges)
gridsize : int or list, default=100
The number of grid points to evaluate the marginal distribution on
for every parameter (int) or each parameter (list)
marginals : float(n_targets, gridsize*n_params) or None, default=None
The full distribution evaluated on a grid to put into marginal list
"""
self.prior = prior
try:
self.n_params = self.prior.event_shape[0]
except Exception:
raise ValueError(
"`prior` has no event_shape - this should be `n_params`")
if not hasattr(self.prior, "low"):
raise ValueError(
"`prior` must have (or be given by assignment) a `low` " +
"attribute describing the minimum allowed value for each " +
"parameter value")
if not hasattr(self.prior, "low"):
raise ValueError(
"`prior` must have (or be given by assignment) a `low` " +
"attribute describing the minimum allowed value for each " +
"parameter value")
self.gridsize = self.get_gridsize(gridsize, self.n_params)
self.ranges = [
np.linspace(
self.prior.low[i],
self.prior.high[i],
self.gridsize[i])
for i in range(self.n_params)]
self.marginals = self.put_marginals(marginals)
def get_gridsize(self, gridsize, size):
"""Propogates gridpoints per parameter into a list if int provided
Parameters
----------
gridsize : int or list
The number of grid points to evaluate the marginal distribution on
for every parameter (int) or each parameter (list)
size : int
Number of parameters describing size of gridsize list
Returns
-------
list:
The number of gridpoints to evaluate marginals for each parameter
Raises
------
ValueError
If list passed is wrong length
TypeError
If gridsize is not int or list of correct length
"""
if isinstance(gridsize, int):
gridsize = [gridsize for i in range(size)]
elif isinstance(gridsize, list):
if len(gridsize) == size:
gridsize = gridsize
else:
raise ValueError(
f"`gridsize` is a list of length {len(gridsize)} but " +
f"`shape` determined by `input` is {size}")
else:
raise TypeError("`gridsize` is not a list or an integer")
return gridsize
def get_levels(self, marginal, ranges, levels=[0.68, 0.95]):
""" Used for approximately calculating confidence region isocontours
To calculate the values of the marginal distribution whose isocontour
contains approximately `levels` specified fraction of samples drawn
from the distribution the marginal distribution is sorted by value and
normalised to one. If the distribution does is significantly non-zero
outside of the range then this will cause large biases! The cumulative
sum of the sorted values are then calculated and the value at which
the index of this normalised cumulative distribution is closest to the
desired level is used to return the value of the original marginal
distribution.
Parameters
----------
marginal : float(gridsize*n_param)
The gridded marginal distribution to find the isocontour values of
ranges : list
List of the grid points for each parameter
levels : list, default=[0.68, 0.95]
The fraction describing the percentage of samples inside the
isocontour
Returns
-------
list:
The values of the isocontours of the marginal distributions
"""
domain_volume = 1
for i in range(len(ranges)):
domain_volume *= ranges[i][1] - ranges[i][0]
sorted_marginal = np.sort(marginal.flatten())[::-1]
cdf = np.cumsum(sorted_marginal / sorted_marginal.sum())
value = []
for level in levels[::-1]:
this_value = sorted_marginal[np.argmin(np.abs(cdf - level))]
if len(value) == 0:
value.append(this_value)
elif this_value <= value[-1]:
break
else:
value.append(this_value)
return value
def setup_plot(self, ax=None, ranges=None, axis_labels=None,
figsize=(10, 10), format=False):
"""Builds corner plot
ax : list of matplotlib axes or None, default=None
If ax is None a new figure and axes are created to make a corner
plot otherwise a set of axes are formatted correctly for a corner
plot. If existing ax is not correctly shaped there will be an error
ranges : list, default=None
The list of ranges to set the number of rows and columns (if this
is None there will be an error)
axis_labels : list of str or None, default=None
A list of names for each parameter, no axis labels if None
figsize : tuple, default=(10, 10)
The size (in inches) to create a figure (if ax is None)
format : bool, default=False
If formatting is not needed
Returns
-------
axes object:
The formatted matplotlib axes
"""
rows = len(ranges)
columns = len(ranges)
if ax is None:
fig, ax = plt.subplots(rows, columns, figsize=figsize)
plt.subplots_adjust(wspace=0.1, hspace=0.1)
elif not format:
return ax
for column in range(columns):
for row in range(rows):
if column > row:
ax[row, column].axis("off")
else:
if row > 0:
ax[row, column].set_ylim(
ranges[row][0],
ranges[row][-1])
if (column == 0) and (axis_labels is not None):
ax[row, column].set_ylabel(axis_labels[row])
else:
ax[row, column].set_yticks([])
if row < rows - 1:
ax[row, column].set_xticks([])
if column > 0:
ax[row, column].set_yticks([])
if column < columns - 1:
ax[row, column].set_xlim(
ranges[column][0],
ranges[column][-1])
if (row == rows - 1) and (axis_labels is not None):
ax[row, column].set_xlabel(axis_labels[column])
else:
ax[row, column].set_xticks([])
return ax
def _scatter_plot(self, ax=None, ranges=None, points=None, label=None,
axis_labels=None, colours=None, hist=True, s=5.,
alpha=1., figsize=(10, 10), linestyle="solid",
target=None, format=False, ncol=2,
bbox_to_anchor=(0.0, 1.0)):
"""Plotter for scatter plots
Plots scatter plots for points (parameters or summaries) in 2D subplots
and the histogram of points in the 1D diagonal subplots.
Parameters
----------
ax : list of matplotlib axes or None, default=None
If ax is None a new figure and axes are created to make a corner
plot otherwise a set of axes are formatted correctly for a corner
plot. If existing ax is not correctly shaped there will be an error
ranges : list, default=None
The list of ranges to set the number of rows and columns (if this
is None the ranges set on initialisation will be used)
points : float(n_targets, n_points, {n_params} or {n_summaries})
The points to scatter plot
label : str or None, default=None
Name to be used in the legend
axis_labels : list of str or None, default=None
A list of names for each parameter, no axis labels if None
colours : str or list or None, default=None
The colour or list of colours to use for the different targets, if
None then the normal matplotlib colours are used
hist : bool, default=True
Whether or not to plot histograms on the diagonal of the corner
plot
s : float, default=5.
The size of the marker points in the scatter plot
alpha : float, default=1.
The amount of alpha colour for the marker points
figsize : tuple, default=(10, 10)
The size (in inches) to create a figure (if ax is None)
linestyle : str, default="solid"
Linestyle for the histograms
target : None or int or list, default=None
The indices to choose which target's points are plotted
format : bool, default=False
If formatting is not needed
ncols : int, default=2
Number of columns for the legend
bbox_to_anchor : tuple, default=(0.0, 1.0)
Position to fix the legend to
Returns
-------
axes object:
The scatter plot axes
Raises
------
ValueError
if colour input is not correct
Todo
----
There are many extra matplotlib parameters which could be passed,
although this is not done because the axis is returned which can then
be manipulated.
"""
if ranges is None:
raise ValueError("`ranges` must be provided")
if points is None:
raise ValueError("`points` to scatter must be provided")
targets, n_targets = self.target_choice(target)
if colours is None:
colours = [f"C{i}" for i in range(n_targets)]
elif isinstance(colours, str):
colours = [colours for i in range(n_targets)]
elif isinstance(colours, list):
pass
else:
raise ValueError(
"`colours` must be None, a color as a | |
<gh_stars>1-10
from abaqusConstants import *
class RemeshingRule:
"""The RemeshingRule object controls the adaptive remeshing resizing and the error
indicators written to the output database for a specified region of the model.
Attributes
----------
suppressed: Boolean
A Boolean specifying whether the remeshing rule is suppressed. Remeshing of the
remeshing rule's region will not occur if you suppress a rule. The default value is OFF.
Notes
-----
This object can be accessed by:
.. code-block:: python
import mesh
mdb.models[name].remeshingRules[name]
"""
# A Boolean specifying whether the remeshing rule is suppressed. Remeshing of the
# remeshing rule's region will not occur if you suppress a rule. The default value is OFF.
suppressed: Boolean = OFF
def __init__(self, name: str, stepName: str, variables: tuple, description: str = '',
region: SymbolicConstant = MODEL, sizingMethod: SymbolicConstant = DEFAULT,
errorTarget: float = 0, maxSolutionErrorTarget: float = 0,
minSolutionErrorTarget: float = 0, meshBias: int = 0, minElementSize: float = 0,
maxElementSize: float = 0, outputFrequency: SymbolicConstant = LAST_INCREMENT,
specifyMinSize: Boolean = OFF, specifyMaxSize: Boolean = ON,
coarseningFactor: SymbolicConstant = DEFAULT_LIMIT,
refinementFactor: SymbolicConstant = DEFAULT_LIMIT, elementCountLimit: int = None):
"""This method creates a RemeshingRule object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].RemeshingRule
Parameters
----------
name
A String specifying the name of the object.
stepName
A String specifying the name of the step in which resizing should occur for this rule.
variables
A sequence of Strings specifying the output request variables that Abaqus will use as
error indicators.
description
A String specifying a descriptive string for this rule. The default value is an empty
string.
region
The SymbolicConstant MODEL or a Region object specifying the region in which Abaqus will
remesh and generate output. The SymbolicConstant MODEL represents the entire applicable
model. The default value is MODEL.
sizingMethod
A SymbolicConstant specifying the method for calculating the new mesh sizes. The
SymbolicConstant DEFAULT indicates that Abaqus will use the default calculation method
for each individual variable. Possible values are DEFAULT, UNIFORM_ERROR, and
MINIMUM_MAXIMUM. The default value is DEFAULT.
errorTarget
A Float specifying the target error percentage for each variable in the region. A value
of 0.0 indicates that Abaqus will use automated error target reduction for the region.
You use the *errorTarget* argument when *sizingMethod*=UNIFORM_ERROR. The default value
is 0.0.
maxSolutionErrorTarget
A Float specifying the target error percentage at the location of the maximum solution
value in the region. A value of 0.0 indicates that Abaqus will use automated error
target reduction for the region. You use the *maxSolutionErrorTarget* argument when
*sizingMethod*=MINIMUM_MAXIMUM. The default value is 0.0.
minSolutionErrorTarget
A Float specifying the target error percentage at the location of the minimum solution
value in the region. A value of 0.0 indicates that Abaqus will use automated error
target reduction for the region. You use the *minSolutionErrorTarget* argument when
*sizingMethod*=MINIMUM_MAXIMUM. The default value is 0.0.
meshBias
An Int specifying an indication of how much Abaqus will bias the mesh toward the
location of the maximum solution value in the region. The higher the value, the more the
mesh will bias towards the location of the maximum solution value. You use the
*meshBias* argument when *sizingMethod*=MINIMUM_MAXIMUM. The default value is 0.0.
minElementSize
A Float specifying the minimum size of any single element. The default value is 0.0.
maxElementSize
A Float specifying the maximum size of any single element. The default value is 0.0.
outputFrequency
A SymbolicConstant specifying the frequency with which the error indicators are saved to
the output database file (.odb). Possible values are LAST_INCREMENT and ALL_INCREMENTS.
The default value is LAST_INCREMENT.
specifyMinSize
A Boolean specifying an indication of whether to use a user-supplied minimum element
size or to calculate a characteristic minimum element size. The default value is OFF.
specifyMaxSize
A Boolean specifying an indication of whether to use a user-supplied maximum element
size or to calculate a characteristic maximum element size. The default value is ON.
coarseningFactor
A SymbolicConstant or an Int specifying an indication of the upper limit on the element
growth from one remeshing iteration to the next. Possible values are DEFAULT_LIMIT and
NOT_ALLOWED. The default value is DEFAULT_LIMIT.
refinementFactor
A SymbolicConstant or an Int specifying an indication of the upper limit on element
shrinkage from one remeshing iteration to the next. Possible values are DEFAULT_LIMIT
and NOT_ALLOWED. The default value is DEFAULT_LIMIT.
elementCountLimit
None or an Int specifying an approximate limit on the number of elements that will be
created during remeshing. Use None to indicate there is not upper limit. The default
value is None.
Returns
-------
A RemeshingRule object.
Raises
------
AbaqusException.
"""
pass
def resume(self):
"""This method resumes the remeshing rule that was previously suppressed.
"""
pass
def suppress(self):
"""This method suppresses the remeshing rule. Abaqus will not remesh regions where the
rules are suppressed.
"""
pass
def setValues(self, description: str = '', region: SymbolicConstant = MODEL,
sizingMethod: SymbolicConstant = DEFAULT, errorTarget: float = 0,
maxSolutionErrorTarget: float = 0, minSolutionErrorTarget: float = 0, meshBias: int = 0,
minElementSize: float = 0, maxElementSize: float = 0,
outputFrequency: SymbolicConstant = LAST_INCREMENT, specifyMinSize: Boolean = OFF,
specifyMaxSize: Boolean = ON, coarseningFactor: SymbolicConstant = DEFAULT_LIMIT,
refinementFactor: SymbolicConstant = DEFAULT_LIMIT, elementCountLimit: int = None):
"""This method modifies the RemeshingRule object.
Parameters
----------
description
A String specifying a descriptive string for this rule. The default value is an empty
string.
region
The SymbolicConstant MODEL or a Region object specifying the region in which Abaqus will
remesh and generate output. The SymbolicConstant MODEL represents the entire applicable
model. The default value is MODEL.
sizingMethod
A SymbolicConstant specifying the method for calculating the new mesh sizes. The
SymbolicConstant DEFAULT indicates that Abaqus will use the default calculation method
for each individual variable. Possible values are DEFAULT, UNIFORM_ERROR, and
MINIMUM_MAXIMUM. The default value is DEFAULT.
errorTarget
A Float specifying the target error percentage for each variable in the region. A value
of 0.0 indicates that Abaqus will use automated error target reduction for the region.
You use the *errorTarget* argument when *sizingMethod*=UNIFORM_ERROR. The default value
is 0.0.
maxSolutionErrorTarget
A Float specifying the target error percentage at the location of the maximum solution
value in the region. A value of 0.0 indicates that Abaqus will use automated error
target reduction for the region. You use the *maxSolutionErrorTarget* argument when
*sizingMethod*=MINIMUM_MAXIMUM. The default value is 0.0.
minSolutionErrorTarget
A Float specifying the target error percentage at the location of the minimum solution
value in the region. A value of 0.0 indicates that Abaqus will use automated error
target reduction for the region. You use the *minSolutionErrorTarget* argument when
*sizingMethod*=MINIMUM_MAXIMUM. The default value is 0.0.
meshBias
An Int specifying an indication of how much Abaqus will bias the mesh toward the
location of the maximum solution value in the region. The higher the value, the more the
mesh will bias towards the location of the maximum solution value. You use the
*meshBias* argument when *sizingMethod*=MINIMUM_MAXIMUM. The default value is 0.0.
minElementSize
A Float specifying the minimum size of any single element. The default value is 0.0.
maxElementSize
A Float specifying the maximum size of any single element. The default value is 0.0.
outputFrequency
A SymbolicConstant specifying the frequency with which the error indicators are saved to
the output database file (.odb). Possible values are LAST_INCREMENT and ALL_INCREMENTS.
The default value is | |
import unittest
import json
import copy
import os
from anchore_engine.services.policy_engine.engine.policy.gate import ExecutionContext
from anchore_engine.services.policy_engine.engine.policy.bundles import build_bundle, GateAction
from anchore_engine.db import get_thread_scoped_session as get_session, Image
from anchore_engine.services.policy_engine.engine.tasks import ImageLoadTask
from test.services.policy_engine.utils import init_db, LocalTestDataEnvironment
from anchore_engine.services.policy_engine.engine.policy.exceptions import InitializationError, UnsupportedVersionError, BundleTargetTagMismatchError
class TestPolicyBundleEval(unittest.TestCase):
invalid_empty_bundle = {}
valid_empty_bundle = {
'id': 'someid',
'version': '1_0',
'name': 'empty_bundle'
}
test_env = LocalTestDataEnvironment(os.environ['ANCHORE_ENGINE_TEST_HOME'])
# test_image_ids = {
# 'busybox': 'c75bebcdd211f41b3a460c7bf82970ed6c75acaab9cd4c9a4e125b03ca113798',
# 'node': '6c792d9195914c8038f4cabd9356a5af47ead140b87682c8651edd55b010686c',
# 'centos': '8140d0c64310d4e290bf3938757837dbb8f806acba0cb3f6a852558074345348',
# 'ruby': 'f5cfccf111795cc67c1736df6ad1832afbd4842533b5a897d91e8d6122963657',
# 'alpine': '02674b9cb179d57c68b526733adf38b458bd31ba0abff0c2bf5ceca5bad72cd9',
# 'debian8': '4594f2fd77bf7ae4ad2b284a60e4eebb1a73b0859fe611b94f4245a6872d803e',
# 'debian9': '3e83c23dba6a16cd936a3dc044df71b26706c5a4c28181bc3ca4a4af9f5f38ee',
# 'fedora': '15895ef0b3b2b4e61bf03d38f82b42011ff7f226c681705a4022ae3d1d643888',
# 'ubuntu:vivid-2015': '83fddfee12bbfa5f36494fbadd7d177dbf5c1b664461de1e6557ead030db13fb',
# }
default_bundle = {}
@classmethod
def setUpClass(cls):
init_db(cls.test_env.mk_db(), do_bootstrap=True)
cls.default_bundle = cls.test_env.get_bundle('default')
cls.old_bundle = cls.test_env.get_bundle('old_default')
def load_images(self):
self.test_env.image_exports()
for img_id, path in self.test_env.image_exports():
t = ImageLoadTask(user_id='0', image_id=img_id)
t.fetch_url = 'file://' + path
t.execute()
def get_image_named(self, db, name):
img_obj = db.query(Image).get((self.test_env.get_images_named(name)[0][0], '0'))
if not img_obj:
self.load_images()
img_obj = db.query(Image).get((self.test_env.get_images_named(name)[0][0], '0'))
return img_obj
def testBasicEvaluation(self):
db = get_session()
print('Building executable bundle from default bundle')
test_tag = 'docker.io/library/ruby:latest'
test_bundle = self.test_env.get_bundle('multitest')
built = build_bundle(test_bundle, for_tag=test_tag)
self.assertFalse(built.init_errors)
print('Got: {}'.format(built))
img_obj = self.get_image_named(db, 'ruby')
self.assertIsNotNone(img_obj, 'Failed to get an image object to test')
evaluation = built.execute(img_obj, tag=test_tag,
context=ExecutionContext(db_session=db, configuration={}))
self.assertIsNotNone(evaluation, 'Got None eval')
print('Native json: {}\n'.format(json.dumps(evaluation.json(), indent=2)))
print('Table json: {}\n'.format(json.dumps(evaluation.as_table_json(), indent=2)))
# Diff old an new defaults
multi_bundle = self.test_env.get_bundle('multi_default')
multi_default = build_bundle(multi_bundle, for_tag=test_tag)
self.assertFalse(built.init_errors)
print('Got: {}'.format(multi_default))
self.assertIsNotNone(img_obj, 'Failed to get an image object to test')
multi_default_evaluation = multi_default.execute(img_obj, tag=test_tag,
context=ExecutionContext(db_session=db, configuration={}))
default_built = build_bundle(self.default_bundle, for_tag=test_tag)
self.assertFalse(built.init_errors)
print('Got: {}'.format(default_built))
self.assertIsNotNone(img_obj, 'Failed to get an image object to test')
default_evaluation = default_built.execute(img_obj, tag=test_tag,
context=ExecutionContext(db_session=db, configuration={}))
self.assertDictEqual(multi_default_evaluation.as_table_json(), default_evaluation.as_table_json())
def testBasicLegacyEvaluation(self):
db = get_session()
print('Building executable bundle from default bundle')
test_tag = 'docker.io/library/ruby:latest'
built = build_bundle(self.default_bundle, for_tag=test_tag)
self.assertFalse(built.init_errors)
print('Got: {}'.format(built))
img_obj = self.get_image_named(db, 'ruby')
self.assertIsNotNone(img_obj, 'Failed to get an image object to test')
evaluation = built.execute(img_obj, tag=test_tag,
context=ExecutionContext(db_session=db, configuration={}))
self.assertIsNotNone(evaluation, 'Got None eval')
print(json.dumps(evaluation.json(), indent=2))
print(json.dumps(evaluation.as_table_json(), indent=2))
print('Building executable bundle from old default bundle')
test_tag = 'docker.io/library/ruby:latest'
built = build_bundle(self.old_bundle, for_tag=test_tag)
self.assertFalse(built.init_errors)
print('Got: {}'.format(built))
img_obj = self.get_image_named(db, 'ruby')
self.assertIsNotNone(img_obj, 'Failed to get an image object to test')
evaluation = built.execute(img_obj, tag=test_tag,
context=ExecutionContext(db_session=db, configuration={}))
self.assertIsNotNone(evaluation, 'Got None eval')
print(json.dumps(evaluation.json(), indent=2))
print(json.dumps(evaluation.as_table_json(), indent=2))
def testDuplicateRuleEvaluation(self):
print('Building executable bundle from default bundle')
test_tag = 'docker.io/library/ruby:latest'
multi_gate_bundle = {
'id': 'multigate1',
'name': 'Multigate test1',
'version': '1_0',
'policies': [
{
'id': 'policy1',
'name': 'Test policy1',
'version': '1_0',
'rules': [
{
'gate': 'DOCKERFILECHECK',
'trigger': 'FROMSCRATCH',
'params': [],
'action': 'GO'
},
{
'gate': 'DOCKERFILECHECK',
'trigger': 'FROMSCRATCH',
'params': [],
'action': 'STOP'
},
{
'action': 'stop',
'gate': 'DOCKERFILECHECK',
'trigger': 'DIRECTIVECHECK',
'params': [
{
'name': 'DIRECTIVES',
'value': 'RUN'
},
{
'name': 'CHECK',
'value': 'exists'
}
]
},
{
'action': 'STOP',
'gate': 'DOCKERFILECHECK',
'trigger': 'DIRECTIVECHECK',
'params': [
{
'name': 'DIRECTIVES',
'value': 'USER'
},
{
'name': 'CHECK',
'value': 'not_exists'
}
]
},
{
'action': 'STOP',
'gate': 'DOCKERFILECHECK',
'trigger': 'DIRECTIVECHECK',
'params': [
{
'name': 'DIRECTIVES',
'value': 'RUN'
},
{
'name': 'CHECK',
'value': '=',
'check_value': 'yum update -y'
}
]
}
]
}
],
'whitelists': [],
'mappings': [
{
'registry': '*', 'repository': '*', 'image': {'type': 'tag', 'value': '*'}, 'policy_id': 'policy1', 'whitelist_ids': []
}
]
}
built = build_bundle(multi_gate_bundle, for_tag=test_tag)
self.assertFalse(built.init_errors)
print('Got: {}'.format(built))
db = get_session()
img_obj = self.get_image_named(db, 'ruby')
self.assertIsNotNone(img_obj, 'Failed to get an image object to test')
evaluation = built.execute(img_obj, tag=test_tag,
context=ExecutionContext(db_session=db, configuration={}))
self.assertIsNotNone(evaluation, 'Got None eval')
print(json.dumps(evaluation.json(), indent=2))
print(json.dumps(evaluation.as_table_json(), indent=2))
def test_image_whitelist(self):
bundle = {
'id': 'multigate1',
'name': '<NAME>',
'version': '1_0',
'policies': [
{
'id': 'policy1',
'name': 'Test policy1',
'version': '1_0',
'rules': [
{
'gate': 'always',
'trigger': 'always',
'params': [],
'action': 'STOP'
}
]
}
],
'whitelists': [],
'mappings': [
{
'registry': '*', 'repository': '*', 'image': {'type': 'tag', 'value': '*'}, 'policy_id': 'policy1', 'whitelist_ids': []
}
],
'whitelisted_images': [
{
'registry': '*',
'repository': '*',
'image': {
'type': 'tag',
'value': 'latest'
}
}
],
'blacklisted_images': []
}
db = get_session()
img_obj = self.get_image_named(db, 'ruby')
self.assertIsNotNone(img_obj, 'Failed to get an image object to test')
test_tag = 'docker.io/library/ruby:alpine'
built = build_bundle(bundle, for_tag=test_tag)
evaluation = built.execute(img_obj, tag=test_tag,
context=ExecutionContext(db_session=db, configuration={}))
self.assertIsNotNone(evaluation)
self.assertEqual(GateAction.stop, evaluation.bundle_decision.final_decision)
self.assertEqual('policy_evaluation', evaluation.bundle_decision.reason)
self.assertIsNotNone(img_obj, 'Failed to get an image object to test')
test_tag = 'docker.io/library/ruby:latest'
built = build_bundle(bundle, for_tag=test_tag)
evaluation = built.execute(img_obj, tag=test_tag,
context=ExecutionContext(db_session=db, configuration={}))
self.assertIsNotNone(evaluation)
self.assertEqual(GateAction.go, evaluation.bundle_decision.final_decision)
self.assertEqual('whitelisted', evaluation.bundle_decision.reason)
def test_image_blacklist(self):
bundle = {
'id': 'multigate1',
'name': '<NAME>',
'version': '1_0',
'policies': [
{
'id': 'policy1',
'name': 'Test policy1',
'version': '1_0',
'rules': [
{
'gate': 'always',
'trigger': 'always',
'params': [],
'action': 'STOP'
}
]
}
],
'whitelists': [],
'mappings': [
{
'registry': '*', 'repository': '*', 'image': {'type': 'tag', 'value': '*'}, 'policy_id': 'policy1', 'whitelist_ids': []
}
],
'blacklisted_images': [
{
'registry': '*',
'repository': '*',
'image': {
'type': 'tag',
'value': 'latest'
}
}
],
'whitelisted_images': []
}
db = get_session()
img_obj = self.get_image_named(db, 'ruby')
self.assertIsNotNone(img_obj, 'Failed to get an image object to test')
test_tag = 'docker.io/library/ruby:alpine'
built = build_bundle(bundle, for_tag=test_tag)
evaluation = built.execute(img_obj, tag=test_tag,
context=ExecutionContext(db_session=db, configuration={}))
self.assertIsNotNone(evaluation)
self.assertEqual(GateAction.stop, evaluation.bundle_decision.final_decision)
self.assertEqual('policy_evaluation', evaluation.bundle_decision.reason)
self.assertIsNotNone(img_obj, 'Failed to get an image object to test')
test_tag = 'docker.io/library/ruby:latest'
built = build_bundle(bundle, for_tag=test_tag)
evaluation = built.execute(img_obj, tag=test_tag,
context=ExecutionContext(db_session=db, configuration={}))
self.assertIsNotNone(evaluation)
self.assertEqual(GateAction.stop, evaluation.bundle_decision.final_decision)
self.assertEqual('blacklisted', evaluation.bundle_decision.reason)
bundle = {
'id': 'emptytest1',
'name': 'Empty mapping test1',
'version': '1_0',
'policies': [],
'whitelists': [],
'mappings': [],
'blacklisted_images': [
{
'registry': '*',
'repository': '*',
'image': {
'type': 'tag',
'value': '*'
}
}
],
'whitelisted_images': []
}
built = build_bundle(bundle, for_tag=test_tag)
evaluation = built.execute(img_obj, tag=test_tag,
context=ExecutionContext(db_session=db, configuration={}))
self.assertIsNotNone(evaluation)
self.assertEqual(GateAction.stop, evaluation.bundle_decision.final_decision)
self.assertEqual('blacklisted', evaluation.bundle_decision.reason)
bundle = {
'id': 'emptytest1',
'name': 'Empty mapping test1',
'version': '1_0',
'policies': [],
'whitelists': [],
'mappings': [],
'whitelisted_images': [
{
'registry': '*',
'repository': '*',
'image': {
'type': 'tag',
'value': '*'
}
}
],
'blacklisted_images': []
}
built = build_bundle(bundle, for_tag=test_tag)
evaluation = built.execute(img_obj, tag=test_tag,
context=ExecutionContext(db_session=db, configuration={}))
self.assertIsNotNone(evaluation)
self.assertEqual(GateAction.go, evaluation.bundle_decision.final_decision)
self.assertEqual('whitelisted', evaluation.bundle_decision.reason)
def testWhitelists(self):
print('Building executable bundle from default bundle')
test_tag = 'docker.io/library/ruby:latest'
built = build_bundle(self.default_bundle, for_tag=test_tag)
self.assertFalse(built.init_errors)
print('Got: {}'.format(built))
db = get_session()
img_obj = self.get_image_named(db, 'ruby')
self.assertIsNotNone(img_obj, 'Failed to get an image object to test')
evaluation = built.execute(img_obj, tag=test_tag,
context=ExecutionContext(db_session=db, configuration={}))
self.assertIsNotNone(evaluation, 'Got None eval')
print(json.dumps(evaluation.json(), indent=2))
print(json.dumps(evaluation.as_table_json(), indent=2))
to_whitelist = evaluation.bundle_decision.policy_decisions[0].decisions[0]
whitelist_bundle = copy.deepcopy(self.default_bundle)
whitelist_bundle['whitelists'].append({
'id': 'generated_whitelist1',
'name': 'test_whitelist',
'version': '1_0',
'items': [
{
'gate': to_whitelist.match.trigger.gate_cls.__gate_name__,
'trigger_id': to_whitelist.match.id,
'id': 'test_whitelistitem'
}
]
})
whitelist_bundle['mappings'][0]['whitelist_ids'] = ['generated_whitelist1']
built = build_bundle(whitelist_bundle, for_tag=test_tag)
print('Got updated: {}'.format(built))
img_obj = self.get_image_named(db, 'ruby')
self.assertIsNotNone(img_obj, 'Failed to get an image object to test')
evaluation = built.execute(img_obj, tag=test_tag,
context=ExecutionContext(db_session=db, configuration={}))
self.assertIsNotNone(evaluation, 'Got None eval')
#print(json.dumps(evaluation.json(), indent=2))
#print(json.dumps(evaluation.as_table_json(), indent=2))
self.assertNotIn(to_whitelist.match.id, map(lambda x: x.match.id if not (hasattr(x.match, 'is_whitelisted') and x.match.is_whitelisted) else None, evaluation.bundle_decision.policy_decisions[0].decisions))
def testErrorEvaluation(self):
bundle = {
'id': 'someid',
'version': '1_0',
'whitelists': [],
'policies': [],
'mappings': []
}
print('Building executable bundle from default bundle')
test_tag = 'docker.io/library/ruby:latest'
built = build_bundle(bundle, for_tag=test_tag)
print('Got: {}'.format(built))
db = get_session()
img_obj = self.get_image_named(db, 'ruby')
self.assertIsNotNone(img_obj, 'Failed to get an image object to test')
evaluation = built.execute(img_obj, tag=test_tag,
context=ExecutionContext(db_session=db, configuration={}))
self.assertIsNotNone(evaluation, 'Got None eval')
print('Result: {}'.format(json.dumps(evaluation.as_table_json(), indent=2)))
with self.assertRaises(BundleTargetTagMismatchError) as f:
evaluation = built.execute(img_obj, tag='docker.io/library/ubuntu:vivid-2015',
context=ExecutionContext(db_session=db, configuration={}))
def testDeprecatedGateEvaluation(self):
bundle = {
'id': 'someid',
'version': '1_0',
'whitelists': [],
'policies': [
{'id': 'abc',
'name': 'Deprecated Policy',
'version': '1_0',
'rules': [
{
'gate': 'PKGDIFF',
'trigger': 'pkgadd',
'params': [],
'action': 'stop'
},
{
'gate': 'always',
'trigger': 'always',
'action': 'go',
'params': []
},
{
'gate': 'ANCHORESEC',
'trigger': 'VULNLOW',
'action': 'warn',
'params': []
}
]
}
],
'mappings': [
{'registry': '*', 'repository': '*', 'image': {'type': 'tag', 'value': '*'}, 'name': 'Default', 'policy_id': 'abc', 'whitelist_ids': []}
]
}
print('Building executable bundle from default bundle')
test_tag = 'docker.io/library/ruby:latest'
db = get_session()
with self.assertRaises(InitializationError) as ex:
built = build_bundle(bundle, for_tag=test_tag, allow_deprecated=False)
print('Got: {}'.format(built))
img_obj = self.get_image_named(db, 'ruby')
self.assertIsNotNone(img_obj, 'Failed to get an image object to test')
evaluation = built.execute(img_obj, tag=test_tag,
context=ExecutionContext(db_session=db, configuration={}))
built = build_bundle(bundle, for_tag=test_tag, allow_deprecated=True)
print('Got: {}'.format(built))
img_obj = self.get_image_named(db, 'ruby')
self.assertIsNotNone(img_obj, 'Failed to get an image object to test')
evaluation = built.execute(img_obj, tag=test_tag,
context=ExecutionContext(db_session=db, configuration={}))
self.assertIsNotNone(evaluation, 'Got None eval')
print('Result: {}'.format(json.dumps(evaluation.json(), indent=2)))
self.assertIsNotNone(evaluation.warnings)
def testPolicyInitError(self):
db = get_session()
img_obj = self.get_image_named(db, 'ruby')
ruby_tag = 'dockerhub/library/ruby:latest'
with self.assertRaises(UnsupportedVersionError) as f:
built = build_bundle({
'id':'someid',
'version': 'invalid_version',
'name': 'invalid_version',
'whitelists': [],
'policies': [],
'mappings': []
})
built.execute(image_object=img_obj, context=None, tag=ruby_tag)
with self.assertRaises(InitializationError) as f:
built = build_bundle({
'id':'someid',
'version': | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Algorithm presented here are described in:
# Blind Denoising with Random Greedy Pursuits.
# <NAME>., <NAME>., <NAME>., & <NAME>. (2014).
# IEEE Signal Processing Letters, 21(11), 1341�1345
#
# License: BSD (3-clause)
from math import sqrt
import multiprocessing
from functools import partial
import numpy as np
from scipy.special import erfinv
from scipy import linalg
from joblib import Parallel, delayed, Memory
from mdct_tools import mdct_waveform, mdct, MDCT
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (int, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def _single_mp_run(x, Phi, bound, max_iter, verbose=False, pad=0,
random_state=None, memory=Memory(None)):
""" run of the RSSMP algorithm """
rng = check_random_state(random_state)
pad = int(pad)
x = np.concatenate((np.zeros(pad), x, np.zeros(pad)))
n = x.size
m = Phi.doth(x).size
err_mse = []
# Initialisation
residual = np.concatenate((x.copy(), np.zeros(max(Phi.sizes) / 2)))
s = np.zeros(m)
x_est = np.zeros(n)
# Main algorithm
coeffs = np.zeros(m)
it_number = 0
current_lambda = 1
err_mse.append(linalg.norm(residual))
# Decomposition loop: stopping criteria is either SNR or iteration number
while (current_lambda > bound) & (it_number < max_iter):
# pick a shift at random : in each size
rndshifts = []
for scale_idx, size in enumerate(Phi.sizes):
shift = rng.randint(low=0, high=size / 4)
coeffs[scale_idx * n:(scale_idx + 1) * n] = mdct(
residual[shift:shift + n], size).ravel()
rndshifts.append(shift)
# Select a new element
idx = np.argmax(np.abs(coeffs))
# Update coefficients
s[idx] += coeffs[idx]
# Only one method now : local update via a cached waveform
# find scale and frequency bin of selected atom
mdct_wf = memory.cache(mdct_waveform)
scale_idx = idx // n
size = Phi.sizes[scale_idx]
F = n // (size // 2)
frame = (idx - (scale_idx * n)) % F
freq_bin = ((idx - (scale_idx * n))) // F
pos = (frame * size / 2) - size / 4 + rndshifts[scale_idx]
residual[pos:pos + size] -= coeffs[idx] * mdct_wf(size, freq_bin)
# also add it to the reconstruction
x_est[pos:pos + size] += coeffs[idx] * mdct_wf(size, freq_bin)
# error computation (err_mse)
err_mse.append(linalg.norm(residual))
current_lambda = np.sqrt(1 - err_mse[-1] / err_mse[-2])
if current_lambda <= bound:
x_est[pos:pos + size] -= coeffs[idx] * mdct_wf(size, freq_bin)
if verbose:
print("Iteration %d : Current lambda of %1.4f" % (
it_number, current_lambda))
it_number += 1
return x_est, err_mse
def _single_multichannel_mp_run(X, Phi, bound, selection_rule, stop_crit,
max_iter, verbose=False, pad=0,
random_state=None, memory=Memory(None)):
""" run of the structured variant of the RSSMP algorithm """
rng = check_random_state(random_state)
# padding as v stak
pad = int(pad)
n_channels = X.shape[0]
X = np.hstack((np.zeros((n_channels, pad)), X,
np.zeros((n_channels, pad))))
n_samples = X.shape[1]
n_projs = Phi.doth(X).shape[1]
err_mse = {}
# Initialisation
residual = np.hstack((X.copy(), np.zeros((n_channels,
max(Phi.sizes) / 2))))
s_rep = np.zeros((n_channels, n_projs))
X_est = np.zeros((n_channels, n_samples))
# Main algorithm
coeffs = np.zeros((n_channels, n_projs))
it_number = 0
current_lambda = 1
for c_idx in range(n_channels):
err_mse[c_idx] = []
err_mse[c_idx].append(linalg.norm(residual[c_idx, :]))
# Decomposition loop: stopping criteria is either SNR or iteration number
while (current_lambda > bound) & (it_number < max_iter):
# pick a shift at random : in each size
rndshifts = {}
for c_idx in range(n_channels):
rndshifts[c_idx] = []
for s_idx, L in enumerate(Phi.sizes):
shift = rng.randint(low=0, high=L / 4)
for c_idx in range(n_channels):
coeffs[c_idx, s_idx * n_samples:(s_idx + 1) * n_samples] = \
mdct(residual[c_idx, shift:shift + n_samples], L).ravel()
rndshifts[c_idx].append(shift)
# Multichannel mode : we combine projections
combined = selection_rule(coeffs ** 2)
# Select a new element
idx = np.argmax(np.abs(combined))
# find scale and frequency bin of selected atom
s_idx = idx // n_samples
L = Phi.sizes[s_idx]
F = n_samples // (L // 2)
frame = (idx - (s_idx * n_samples)) % F
freq_bin = ((idx - (s_idx * n_samples))) // F
mdct_wf = memory.cache(mdct_waveform)
# Update coefficients and residual
current_lambda_array = np.zeros(n_channels)
for c_idx in range(n_channels):
s_rep[c_idx, idx] += coeffs[c_idx, idx]
# Only one method now : local update via a cached waveform
pos = (frame * L / 2) - L / 4 + rndshifts[c_idx][s_idx]
residual[c_idx, pos:pos + L] -= coeffs[c_idx, idx] * \
mdct_wf(L, freq_bin)
# also add it to the reconstruction
X_est[c_idx, pos:pos + L] += coeffs[c_idx, idx] * \
mdct_wf(L, freq_bin)
# error computation (err_mse)
err_mse[c_idx].append(linalg.norm(residual[c_idx, :]))
current_lambda_array[c_idx] = np.sqrt(
1. - err_mse[c_idx][-1] / err_mse[c_idx][-2])
current_lambda = stop_crit(current_lambda_array)
if verbose:
print("Iteration %d : Current lambda of %1.4f" % (
it_number, current_lambda))
it_number += 1
return X_est[:, pad: -pad], err_mse
def _pad(X):
""" add zeroes on the border to make sure the signal length is a
power of two """
p_above = int(np.floor(np.log2(X.shape[1])))
M = 2 ** (p_above + 1) - X.shape[1]
X = np.hstack((np.zeros((X.shape[0], M)), X))
return X, M
def _denoise(seeds, x, dico, sup_bound, n_atoms, verbose=False, indep=True,
stop_crit=None, selection_rule=None, pad=0,
memory=Memory(None)):
""" multiple rssmp runs with a smart stopping criterion using
the convergence decay monitoring
"""
approx = []
for seed in seeds:
if verbose > 0:
print("Run seed %d" % seed)
if indep:
approx.append(_single_mp_run(x, dico, sup_bound, n_atoms,
verbose=verbose, pad=pad,
random_state=seed,
memory=memory)[0])
else:
approx.append(_single_multichannel_mp_run(x, dico, sup_bound,
selection_rule,
stop_crit,
n_atoms, verbose=verbose,
pad=pad,
random_state=seed,
memory=memory)[0])
return approx
def _bird_core(X, scales, n_runs, Lambda_W, max_iter=100,
stop_crit=np.mean,
selection_rule=np.sum,
n_jobs=1, indep=True,
random_state=None, memory=Memory(None), verbose=False):
"""Automatically detect when noise zone has been reached and stop
MP at this point
Parameters
----------
X : array, shape (n_channels, n_times)
The numpy n_channels-vy-N array to be denoised where n_channels is
number of sensors and N the dimension
scales : list
The list of MDCT scales that will be used to built the
dictionary Phi
n_runs : int
the number of runs (n_runs in the paper)
Lambda_W : float
bound for lambda under which a run will be stopped
max_iter : int
Maximum number of iterations (serves as alternate stopping criterion)
stop_crit : function
controls the calculation of Lambda
selection_rule : callable
controls the way multiple channel projections are combined for atom
selection only used if indep=False
n_jobs : int
number of jobs to run in parallel
indep : bool
True for BIRD (independent processing of each channel,
False for S-BIRD (structured sparsity seeked)
random_state : None | int | np.random.RandomState
To specify the random generator state (seed).
memory : instance of Memory
The object to use to cache some computations. If cachedir is None, no
caching is performed.
verbose : bool
verbose mode
Returns
-------
X_denoise : array, shape (n_channels, n_times)
denoised array of same shape as X
"""
Phi = MDCT(scales)
pad = int(1.5 * max(scales))
X_denoise = np.zeros_like(X)
approx = []
rng = check_random_state(random_state)
seeds = rng.randint(4294967295, size=n_runs) # < max seed value
if n_jobs <= 0:
n_cores = multiprocessing.cpu_count()
n_jobs = min(n_cores + n_jobs + 1, n_cores)
if indep:
# Independent treat of each channel (plain BIRD)
for r, x in zip(X_denoise, X):
this_approx = Parallel(n_jobs=n_jobs)(
delayed(_denoise)(this_seeds, x, Phi, Lambda_W,
max_iter, pad=pad, verbose=verbose,
indep=True, memory=memory)
for this_seeds in
np.array_split(seeds, n_jobs))
this_approx = sum(this_approx[1:], this_approx[0])
r[:] = sum([a[pad:-pad] for a in this_approx])
approx.append(this_approx)
else:
# data need to be processed jointly
this_approx = Parallel(n_jobs=n_jobs)(
delayed(_denoise)(this_seeds, X, Phi, Lambda_W,
max_iter, pad=pad, verbose=verbose,
selection_rule=selection_rule,
indep=False, memory=memory,
stop_crit=stop_crit)
for this_seeds in
np.array_split(seeds, n_jobs))
# reconstruction by averaging
for jidx in range(len(this_approx)):
for ridx in range(len(this_approx[jidx])):
X_denoise += this_approx[jidx][ridx]
X_denoise /= float(n_runs)
return X_denoise
def bird(X, scales, n_runs, p_above, max_iter=100, random_state=None,
n_jobs=1, memory=Memory(None), verbose=False):
""" The BIRD algorithm as described in the paper
Parameters
----------
X : array, shape (n_channels, n_times)
The numpy n_channels-vy-N array to be X_denoised where n_channels
is number of sensors and n_times the dimension
scales : list
The list of MDCT scales that will be used to built the
dictionary Phi
n_runs : int
the number of runs (n_runs in the paper)
p_above : float
probability of appearance of the max above which the noise hypothesis
is | |
csvfile:
writer = csv.writer(csvfile)
writer.writerow(["model_name",
"criteria",
"weights",
"normalised_scores",
"tot_metabolic",
"percentage_Biobrick",
"allowed_KOs",
"reactions_KOs",
"objective_value"])
writer.writerow(["#identifier of the model: the first number indicate the variant consumption's reaction search, this is followed by the identifier of the target, finally, the last number indicates the variant for production's reaction search",
"#criteria used to compare the model variants between each other. Default ones are fluxk thorught the exchange reaction of the substrate (minimized), production rate fo the target (maximized), number of reactions knock ins (minimized), MDF value (maximised), length of the pathway (minimized)",
"#user selected weights for each criteria as assigned undr the column 'weight' in the input file",
"#normalized scores for each criteria. Each normalized score ranges from 0 to 1, with 1 indicating the best among the model variants",
"#final score of each model. Derived by multiplying the normed scores by the weight of theri criteria. The final scores ranges from 0 to 1. Scores closer to 1 indicate the most favourable engineering strategies according to the computational metabolic analysis",
"#percentage indicating the number of biobricks found per number of reactions knock ins suggested",
"#number of reaction knock outs allowed per optknock analysis round",
"#BiGG reactions identifiers for the knock outs found with optknock analysis.",
"#objective value resulting from running FBA on the model when the knock-outs are applied. The objective of this optimization is the production rate of the target"])
#for key, value in ranges[1].items():
# if max(value) == min(value):
# print('For {} the normalization should be done with -1000 and 1000'.format(key))
# else:
# print('For {} the normalization has to be done using {} as min and {} as max value'.format(key, min(value), max(value)))
scores_list = []
score_dict = {}
#for key, value in ranges[0].items():
# if max(value) == min(value):
# print('For {} the normalization should be done with -1000 and 1000'.format(key))
# else:
# print('For {} the normalization has to be done using {} as min and {} as max value'.format(key, min(value), max(value)))
list_single_scores = []
for n in range(1, len(output_consumption)+1):
print('---------'+str(n)+'---------')
for key, values in output_con_and_prod['production_'+str(n)].items():
#for m in range(1, len(output_con_and_prod['production_'+str(n)])+1):
if '_Run_' in key:
dict_KI = values[4]
for entry in list(dict_KI):
if dict_KI[entry] == 0:
dict_KI.pop(entry)
number_of_interventions = len(dict_KI)
else:
list_KI_cons = output_con_and_prod['consumption_'+str(n)][1][0]
number_of_interventions = len(list_KI_cons)
if '_Run_' in key:
dict_production = values[3].values()
for i in dict_production:
production_val = i
logging.debug(production_val)
else:
target = to_produce[0]
production_val = values['EX_'+target+'_e flux']
logging.debug('consumption')
scores_consumption={}
if '_Run_' not in key:
dict_consumption = output_con_and_prod['consumption_'+str(n)][1][3]
for c in to_consume:
range_tuple = ranges_per_criteria[c]
print(c, range_tuple)
value = dict_consumption['EX_'+c+'_e']
score = (float(range_tuple[1])-float(value))/(float(range_tuple[1])-float(range_tuple[0]))
scores_consumption[c]=score
list_single_scores.append(score)
logging.debug(score)
else:
dict_consumption = values[6]
for c in to_consume:
range_tuple = ranges_per_criteria[c]
print(c, range_tuple)
value = dict_consumption['EX_'+c+'_e']
score = (float(range_tuple[1])-float(value))/(float(range_tuple[1])-float(range_tuple[0]))
scores_consumption[c]=score
list_single_scores.append(score)
logging.debug(score)
logging.debug(scores_consumption)
logging.debug('production')
scores_production={}
for p in to_produce:
range_tuple = ranges_per_criteria[p]
print(p, range_tuple)
value = production_val
score = (value-(range_tuple[0]))/(range_tuple[1]-range_tuple[0])
scores_production[p] = score
list_single_scores.append(score)
logging.debug(score)
logging.debug('interventions')
if len(values) == 9:
number_of_interventions = number_of_interventions + values[8]
#logging.debug('Number of Interventions', ranges_per_criteria['Interventions'])
if ranges_per_criteria['Interventions'] == None:
Sh = 1
print('score_c: ', scores_consumption, 'score_p: ', scores_production, 'score_KI: ', Sh)
score_total = float(Sh)
else:
logging.debug(ranges_per_criteria['Interventions'][1])
Sh = (int(ranges_per_criteria['Interventions'][1]) - number_of_interventions)/(int(ranges_per_criteria['Interventions'][1])-int(ranges_per_criteria['Interventions'][0]))
print('score_c: ', scores_consumption, 'score_p: ', scores_production, 'score_KI: ', Sh)
Wh = dict_w['interventions']
logging.debug(Wh)
score_total = float(Sh)*float(Wh)
list_single_scores.append(Sh)
logging.debug(score_total)
for x in scores_consumption.keys():
score_total += float(scores_consumption[x])*float(dict_w[x])
for y in scores_production.keys():
score_total += float(scores_production[y])*float(dict_w[y])
if '_Run_' in key:
#TODO: in function for final dict add considering mdf and path lenght even if no KI are needed for production
#---- INSERT VALUE MDF -----
mdf = values[5]['mdf']
logging.debug('mdf')
if mdf != None:
range_val = ranges_per_criteria['MDF']
Smdf = (float(mdf) -(range_val[0]))/(range_val[1]-(range_val[0]))
list_single_scores.append(Smdf)
score_total += (float(Smdf)*float(dict_w['MDF']))
logging.debug(Smdf)
logging.debug(score_total)
else:
mdf = 0
list_single_scores.append(mdf)
score_total += (float(mdf)*float(dict_w['MDF']))
logging.debug(score_total)
#-----INSERT PATHWAY LENGTH ----
path_length = values[5]['pathway_length']
logging.debug('pl')
if path_length != None:
range_val = ranges_per_criteria['path_length']
Spath_length = (range_val[1]-path_length)/(range_val[1]-(range_val[0]))
list_single_scores.append(Spath_length)
score_total += float(Spath_length)*float(dict_w['path_length'])
logging.debug(Spath_length)
logging.debug(score_total)
else:
path_length = 0
list_single_scores.append(path_length)
score_total += float(path_length)*float(dict_w['path_length'])
logging.debug(score_total)
else:
mdf = values['thermodynamic']['mdf']
logging.debug('mdf')
if mdf != None:
range_val = ranges_per_criteria['MDF']
Smdf = (float(mdf) -(range_val[0]))/(range_val[1]-(range_val[0]))
list_single_scores.append(Smdf)
score_total += (float(Smdf)*float(dict_w['MDF']))
logging.debug(Smdf)
logging.debug(score_total)
else:
mdf = 0
list_single_scores.append(mdf)
score_total += (float(mdf)*float(dict_w['MDF']))
logging.debug(score_total)
#-----INSERT PATHWAY LENGTH ----
path_length = values['thermodynamic']['pathway_length']
logging.debug('pl')
if path_length != None:
range_val = ranges_per_criteria['path_length']
Spath_length = (range_val[1]-path_length)/(range_val[1]-(range_val[0]))
list_single_scores.append(Spath_length)
score_total += float(Spath_length)*float(dict_w['path_length'])
logging.debug(Spath_length)
logging.debug(score_total)
else:
path_length = 0
list_single_scores.append(path_length)
score_total += float(path_length)*float(dict_w['path_length'])
logging.debug(score_total)
scores_list.append(score_total)
#print('\n', score_total)
tot_metab_score = [score_total]
score_dict[str(n)+'_'+key] = score_total
for i in range(len(criteria)):
if (i+1) <= len(tot_metab_score):
writer.writerow([(str(n)+'_'+key), criteria[i], weights[i], list_single_scores[i], tot_metab_score[i]])
elif (i+1) > len(tot_metab_score):
writer.writerow(['', criteria[i], weights[i], list_single_scores[i]])
writer.writerow(['-----------------------------------------------------'])
writer.writerow(['rank_position', 'model_name', 'final_metabolic_score'])
writer.writerow(['#position on the model variants once ranked by the final scores in decreasing order',
"#identifier of the model: the first number indicate the variant consumption's reaction search, this is followed by the identifier of the target, finally, the last number indicates the variant for production's reaction search",
"#final score of each model. Derived by multiplying the normed scores by the weight of theri criteria. The final scores ranges from 0 to 1. Scores closer to 1 indicate the most favourable engineering strategies according to the computational metabolic analysis"])
#summary_output(n, dict_w, (str(n)+'_'+key), list_single_scores, tot_metab_score)
return scores_list, score_dict
def scores_evaluations(input_file, output_consumption, output_con_and_prod):
"""
Rank the different engineering strategies
This function parses the output of generate_scores function and
ranks the model variants representing different engineering
strategies by total scores in decreasing order. The model at
position 1 is the one considered the best based on the
"metabolic criteria": consumption and production rate, number of
knock-ins, MDF,pathway length.
-----------------------------------------------------------------
Arguments:
input_file--str, input file in .csv format dictionary like
output_consumption--dict retured by analysis_gf_sol function
output_con_and_prod--dict retured by cons_prod_dict function
Return:
final_output_scores: dict with the ranked variant. The rank
positons are the keys, while the values are tuples with
model name and its finals score.
"""
scoring_info = generate_scores(input_file, output_consumption, output_con_and_prod)
logging.debug(scoring_info)
scores_evaluation = numpy.array(scoring_info[0])
sor = (-scores_evaluation).argsort()
ranksm = numpy.empty_like(sor)
ranksm[sor] = numpy.arange(len(scores_evaluation))
final_rank = ranksm+1
final_rank = ranksm+1
final_output_scores={}
keys = scoring_info[1].keys()
for n in range(1, len(final_rank)+1):
for index, rank in enumerate(final_rank):
if n == rank:
for i, model in enumerate(keys):
if index == i:
print("The final position in the ranking for model {} is {} and its final score is {}".format(model, rank, scoring_info[0][i]))
final_output_scores[n]= (model, scoring_info[0][i])
return final_output_scores
def score_BB_presence(input_file, to_wiki, from_wiki, scores_output):
"""
Get percentage of matching biobrick per model variant
Counts the number of knock ins involved in each different
engineering strategy (model variant) and it counts the number
of biobrick matchning the reaction function were found by
querying the wikidata biobrick database.
It results the percentage of matching biobricks.
-----------------------------------------------------------------
Arguments:
input_file--str, input file in .csv format dictionary like
to_wiki--list of lists with information on EC number
and KEGG ID of each heterologous reaction
from wiki--dict type (nested dictionary) returned from
wikidata query function.
scores_output--dict with the ranked variant. The rank
positons are the keys, while the values are tuples with
model name and its finals score.
Return:
BB_scores: list of tuples in which the object with index 0 is
the name of the model variant, while index 1 indicate
the percentage of biobricks
"""
# Counts the number of heterologous reactions (Knock Ins)
n=0
tot_heterologous = []
for i in to_wiki:
n+=1
logging.debug('---{}---'.format(n))
n_heterologous_reactions = len(i)
#for reaction_info in i.values():
#reaction_info['EC']
logging.debug(n_heterologous_reactions)
tot_heterologous.append(n_heterologous_reactions)
# Counts the number of Biobricks (BB) matching the heterologous reactions
n=0
tot_BB = []
for j in from_wiki:
n+=1
logging.debug('---{}---'.format(n))
n_BB = len(j)
for key, subdict in j.items():
if subdict['BB_name'] == None:
logging.debug('What a shame! no BB for {}'.format(key))
n_BB = n_BB - 1
logging.debug(n_BB)
tot_BB.append(n_BB)
BB_scores = []
# Calculate score and add to the scoring dictionary
for position, tup in scores_output.items():
raw_fraction = tot_BB[position-1] / tot_heterologous[position-1]
#normalized = (raw_fraction - | |
to get SMB information about the OS if possible.
"""
nonlocal host, smb_elem
os_name, cpe, samba_version = "", "", ""
# find and parse CPE strings
cpe_elem = smb_elem.find(".//elem[@key='cpe']")
if cpe_elem is not None:
# Assumption: script only returns one CPE (check if True)
cpe = cpe_elem.text
# find and parse the OS name information contained in the actual output string
output = smb_elem.attrib["output"].strip()
for stmt in output.split("\n"):
stmt = stmt.strip()
key, value = stmt.split(":", 1)
if key == "OS":
os_name = value.split("(", 1)[0].strip()
# try to extract Samba version
match = re.match(r".*\(Samba (\d+\.\d+\.\d+[a-z]?).*\)", value.strip())
if match:
samba_version = match.group(1)
# only add OS info to host if OS name was found
if os_name:
host["os_smb_discv"] = [{"name": os_name}]
if cpe:
host["os_smb_discv"][0]["cpe"] = cpe
# add Samba version to every Samba CPE (assumption: only one version is in use)
if samba_version:
for proto in ("tcp", "udp"):
for portid, port_node in host.get(proto, {}).items():
for i, cpe in enumerate(port_node.get("cpes", [])):
if cpe == "cpe:/a:samba:samba":
cpe += ":" + samba_version
port_node["cpes"][i] = cpe
################################################
#### Main code of Nmap XML parsing function ####
################################################
try:
nm_xml_tree = ET.parse(filepath)
except ET.ParseError:
print("Could not parse file created by Nmap scan. Skipping Nmap scan ...", file=sys.stderr)
return {}
nmaprun_elem = nm_xml_tree.getroot()
hosts = []
# parse every host element
for host_elem in nmaprun_elem.findall("host"):
host = {}
status_elem = host_elem.find("status")
if status_elem is not None:
if "state" in status_elem.attrib:
if status_elem.attrib["state"] == "down":
continue
parse_addresses()
parse_osmatches()
parse_port_information()
# parse additional script information
hostscript_elem = host_elem.find("hostscript")
if hostscript_elem:
# parse smb-os-discovery information
smb_elem = hostscript_elem.find(".//script[@id='smb-os-discovery']")
if smb_elem:
parse_smb_script_information()
hosts.append(host)
return hosts
def transform_to_avain_scan_format(parsed_host: dict):
"""
Transform the Nmap scan results to the AVAIN scan result format.
"""
host = {}
# select one OS out of all OS suggestions from Nmap
host["os"] = select_os(parsed_host)
# copy remaining information from the raw parsed host
host["ip"] = parsed_host["ip"]
host["mac"] = parsed_host["mac"]
host["tcp"] = parsed_host["tcp"]
host["udp"] = parsed_host["udp"]
adjust_port_info_keys(host)
return host
def select_os(parsed_host: dict):
"""
Out of all suggested OSs for the given host from Nmap, select the most
likely one using cosine similarity string matching. First a string of
relevant information is created, second the OS whose information is the
most similar to the matching string is returned.
"""
global CREATED_FILES, DETECTED_OSS, OS_SIM_SCORES
# create cosine similarity matching string and store
matching_string = create_sim_matching_string(parsed_host)
with open(MATCH_STR_PATH, "w") as file:
file.write(matching_string)
CREATED_FILES.append(MATCH_STR_PATH)
potential_oss = extract_oss(parsed_host)
# Put vendor name in front of name if not existent
for pot_os in potential_oss:
if "cpes" in pot_os:
cpes = pot_os["cpes"]
vendor = ""
# assumption: Nmap does not group two OS CPEs with different vendors
if cpes:
cpe_vend = cpes[0][7:]
vendor = cpe_vend[:cpe_vend.find(":")]
if vendor and not pot_os["name"].lower().startswith(vendor):
pot_os["name"] = vendor[0].upper() + vendor[1:] + " " + pot_os["name"]
DETECTED_OSS[parsed_host["ip"]["addr"]] = potential_oss
# compute similarities of potential OSs to matching string
os_sim_scores = []
is_os, highest_sim, = None, -1
for pot_os in potential_oss:
cur_name, sim_sum = "", -1
for word in pot_os["name"].split(" "):
cur_name += word.lower()
cur_sim = util.compute_cosine_similarity(matching_string, cur_name)
sim_sum += cur_sim
cur_name += " "
sim = sim_sum / len(pot_os["name"].split(" "))
if pot_os.get("cpes", []):
avg_cpe_sim = sum(util.compute_cosine_similarity(matching_string, cpe[7:].lower())
for cpe in pot_os["cpes"]) / len(pot_os["cpes"])
sim = (sim + avg_cpe_sim) / 2
sim *= float(pot_os["accuracy"])/100
# print("%s --> %f with %s%%" % (pot_os["name"], sim, pot_os["accuracy"]))
os_sim_scores.append((pot_os, sim))
# iteratively save the OS with the highest similarity to the matching string
if sim > highest_sim:
highest_sim = sim
is_os = pot_os
# store OS sim scores
OS_SIM_SCORES[parsed_host["ip"]["addr"]] = os_sim_scores
if is_os:
return is_os
return {"name": "", "cpes": []}
def create_sim_matching_string(parsed_host: dict):
"""
Fill the matching string with all text that contains information about the OS.
"""
def add_if_exists(obj: dict, field: str):
"""
Add a dict value to the matching string if its key exists in the dict.
"""
nonlocal matching_string
if field in obj:
matching_string += obj[field].lower() + " "
def add_ports_to_matching_string(protocol: str):
"""
Add the name and product information from the service information to the matching string.
"""
nonlocal parsed_host
if protocol in parsed_host:
for _, portinfo in parsed_host[protocol].items():
add_if_exists(portinfo, "product")
add_if_exists(portinfo, "name")
matching_string = ""
# add all OS info to matching string
if "osmatches" in parsed_host:
for osmatch in parsed_host["osmatches"]:
add_if_exists(osmatch, "name")
if "osclasses" in osmatch:
for osclass in osmatch["osclasses"]:
if "cpes" in osclass:
for cpe in osclass["cpes"]:
matching_string += cpe.lower() + " "
add_if_exists(osclass, "osfamily")
add_if_exists(osclass, "osgen")
add_if_exists(osclass, "vendor")
# add all service Info OS info to matching string
if "os_si" in parsed_host:
for os_si in parsed_host["os_si"]:
if "cpes" in os_si:
for cpe in os_si["cpes"]:
matching_string += cpe.lower() + " "
add_if_exists(os_si, "name")
# add all smb-os-discovery info (if any exists)
if "os_smb_discv" in parsed_host:
for os_smb_discv in parsed_host["os_smb_discv"]:
if "cpe" in os_smb_discv:
matching_string += os_smb_discv["cpe"].lower() + " "
add_if_exists(os_smb_discv, "name")
# add select port infos to matching string
add_ports_to_matching_string("tcp")
add_ports_to_matching_string("udp")
# add mac vendor to matching string
if "mac" in parsed_host:
add_if_exists(parsed_host["mac"], "vendor")
return matching_string
def extract_oss(parsed_host: dict):
"""
Return a list of potential OSs for the given host. More broad OSs are replaced
by more concrete ones. E.g. within potential_oss, Windows is replaced by Windows 10.
"""
########################################
#### Definition of helper functions ####
########################################
def add_direct_oss():
"""Add the OSs found in the osmatches to poential_oss"""
nonlocal parsed_host, potential_oss
if "osmatches" in parsed_host:
for osmatch in parsed_host["osmatches"]:
if "osclasses" in osmatch:
for osclass in osmatch["osclasses"]:
name = ""
if "vendor" in osclass:
name += osclass["vendor"] + " "
if "osfamily" in osclass:
name += osclass["osfamily"] + " "
if "osgen" in osclass:
name += osclass["osgen"]
name = name.strip()
if osclass.get("cpes", []):
for cpe in osclass["cpes"]:
store_os = True
replace_accuracy = 0
if potential_oss:
for i, pot_os in enumerate(potential_oss):
# if this cpe is substring of another OS's cpe
if any(cpe in pot_cpe for pot_cpe in pot_os["cpes"]):
store_os = False
# if this cpe is a true superstring of another OS's cpe
if any(pot_cpe in cpe and not cpe == pot_cpe for pot_cpe in pot_os["cpes"]):
store_os = True
if int(pot_os["accuracy"]) > int(replace_accuracy):
replace_accuracy = pot_os["accuracy"]
del potential_oss[i]
if store_os:
accuracy = str(max([int(osclass["accuracy"]), int(replace_accuracy)]))
potential_oss.append({"name": name, "cpes": osclass["cpes"],
"accuracy": accuracy, "type": osclass.get("type", "")})
break
else:
if not any(name in pot_os["name"] for pot_os in potential_oss):
potential_oss.append({"name": name, "cpes": [], "accuracy": osclass["accuracy"],
"type": osclass.get("type", "")})
def add_potential_oss_from_service(dict_key: str):
"""
Evaluate nmap Service Info OS information and append result to potential OSs
:param dict_key: the key that identifies the service field within a host dict
"""
nonlocal parsed_host, potential_oss
added_in_service = set()
if dict_key in parsed_host:
for service_elem in parsed_host[dict_key]:
# first check if the OS information of this service contains a more broad OS
found_supstring = False
if "cpe" in service_elem:
service_elem["cpes"] = [service_elem["cpe"]]
if "cpes" in service_elem:
# check if a CPE of the current service is a prefix of a CPE already saved in potential_oss
for cpe in service_elem["cpes"]:
for pot_os in potential_oss:
if "cpes" in pot_os:
if any(cpe in pot_cpe for pot_cpe in pot_os["cpes"]):
found_supstring = True
break
if found_supstring:
break
replaced_os = False
# now check for a substring of name or CPE in potential_oss, i.e. a broad OS
potential_os_cpy = copy.deepcopy(potential_oss)
for i, pot_os in enumerate(potential_os_cpy):
pot_os_cmp = pot_os["name"].replace(" ", "").lower()
service_os_cmp = service_elem["name"].replace(" ", "").lower()
# do OS comparison by name
if pot_os_cmp != service_os_cmp and pot_os_cmp in service_os_cmp:
del potential_oss[i]
new_pot_os = {"name": service_elem["name"], "accuracy": "100",
"type": service_elem.get("devicetype", "")}
if "cpes" in service_elem:
new_pot_os["cpes"] = service_elem["cpes"]
if not replaced_os:
potential_oss.insert(i, new_pot_os)
replaced_os = True
break
# if this OS of potential_oss has a CPE that is a prefix
# of a CPE of the current OS mentioned in the services
elif "cpes" in service_elem and "cpes" in pot_os:
for cpe in service_elem["cpes"]:
if any(pot_cpe in cpe for pot_cpe in pot_os["cpes"]):
del potential_oss[i]
new_pot_os = {"name": service_elem["name"], "cpes": service_elem["cpes"],
"accuracy": "100", "type": service_elem.get("devicetype", "")}
if not replaced_os:
potential_oss.insert(i, new_pot_os)
replaced_os = True
| |
# -*- coding: utf-8 -*-
#
# This file is part of EUDAT B2Share.
# Copyright (C) 2016 CERN.
#
# B2Share is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# B2Share is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with B2Share; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
import uuid
import re
from functools import partial, wraps
from sqlalchemy import and_
from sqlalchemy.orm import aliased
from flask import Blueprint, abort, request, url_for, make_response
from flask import jsonify, Flask, current_app
from flask_mail import Message
from jsonschema.exceptions import ValidationError
from invenio_db import db
from invenio_pidstore import current_pidstore
from invenio_pidstore.resolver import Resolver
from invenio_pidstore.errors import PIDDoesNotExistError, PIDRedirectedError
from invenio_pidstore.models import PersistentIdentifier
from invenio_pidrelations.contrib.versioning import PIDNodeVersioning
from invenio_pidrelations.models import PIDRelation
from invenio_records_files.api import Record
from invenio_rest.errors import RESTValidationError
from invenio_search import RecordsSearch
from invenio_records.models import RecordMetadata
from invenio_records_files.api import RecordsBuckets
from invenio_records_rest.views import (pass_record,
RecordsListResource, RecordResource,
RecordsListOptionsResource,
SuggestResource)
from invenio_records_rest.links import default_links_factory
from invenio_records_rest.query import default_search_factory
from invenio_records_rest.utils import obj_or_import_string
from invenio_mail import InvenioMail
from invenio_mail.tasks import send_email
from invenio_rest import ContentNegotiatedMethodView
from invenio_accounts.models import User
from .providers import RecordUUIDProvider
from .permissions import DeleteRecordPermission
from .proxies import current_records_rest
# duplicated from invenio-records-rest because we need
# to pass the previous version record data
def verify_record_permission(permission_factory, record, **kwargs):
"""Check that the current user has the required permissions on record.
In case the permission check fails, an Flask abort is launched.
If the user was previously logged-in, a HTTP error 403 is returned.
Otherwise, is returned a HTTP error 401.
:param permission_factory: permission factory used to check permissions.
:param record: record whose access is limited.
"""
# Note, cannot be done in one line due overloading of boolean
# operations permission object.
if not permission_factory(record=record, **kwargs).can():
from flask_login import current_user
if not current_user.is_authenticated:
abort(401)
abort(403)
"""Create Invenio-Records-REST blueprint."""
blueprint = Blueprint(
'b2share_records_rest',
__name__,
url_prefix='',
)
def create_blueprint(endpoints):
for endpoint, options in (endpoints or {}).items():
#print("Endpoint: {}".format(endpoint))
#print("- options: {}".format(options))
for rule in create_url_rules(endpoint, **options):
#print("- rule: {}".format(rule))
blueprint.add_url_rule(**rule)
# catch record validation errors
@blueprint.errorhandler(ValidationError)
def validation_error(error):
"""Catch validation errors."""
return RESTValidationError().get_response()
return blueprint
def create_url_rules(endpoint, list_route=None, item_route=None,
pid_type=None, pid_minter=None, pid_fetcher=None,
read_permission_factory_imp=None,
create_permission_factory_imp=None,
update_permission_factory_imp=None,
delete_permission_factory_imp=None,
record_class=None,
record_serializers=None,
record_loaders=None,
search_class=None,
search_serializers=None,
search_index=None, search_type=None,
default_media_type=None,
max_result_window=None, use_options_view=True,
search_factory_imp=None, links_factory_imp=None,
suggesters=None):
"""Create Werkzeug URL rules.
:param endpoint: Name of endpoint.
:param list_route: record listing URL route . Required.
:param item_route: record URL route (must include ``<pid_value>`` pattern).
Required.
:param pid_type: Persistent identifier type for endpoint. Required.
:param template: Template to render. Defaults to
``invenio_records_ui/detail.html``.
:param read_permission_factory_imp: Import path to factory that creates a
read permission object for a given record.
:param create_permission_factory_imp: Import path to factory that creates a
create permission object for a given record.
:param update_permission_factory_imp: Import path to factory that creates a
update permission object for a given record.
:param delete_permission_factory_imp: Import path to factory that creates a
delete permission object for a given record.
:param search_index: Name of the search index used when searching records.
:param search_type: Name of the search type used when searching records.
:param record_class: Name of the record API class.
:param record_serializers: serializers used for records.
:param search_serializers: serializers used for search results.
:param default_media_type: default media type for both records and search.
:param max_result_window: maximum number of results that Elasticsearch can
provide for the given search index without use of scroll. This value
should correspond to Elasticsearch ``index.max_result_window`` value
for the index.
:param use_options_view: Determines if a special option view should be
installed.
:returns: a list of dictionaries with can each be passed as keywords
arguments to ``Blueprint.add_url_rule``.
"""
read_permission_factory = obj_or_import_string(
read_permission_factory_imp
)
create_permission_factory = obj_or_import_string(
create_permission_factory_imp
)
update_permission_factory = obj_or_import_string(
update_permission_factory_imp
)
delete_permission_factory = obj_or_import_string(
delete_permission_factory_imp
)
links_factory = obj_or_import_string(
links_factory_imp, default=default_links_factory
)
record_class = obj_or_import_string(
record_class, default=Record
)
search_class = obj_or_import_string(
search_class, default=RecordsSearch
)
search_class_kwargs = {}
if search_index:
search_class_kwargs['index'] = search_index
else:
search_index = search_class.Meta.index
if search_type:
search_class_kwargs['doc_type'] = search_type
else:
search_type = search_class.Meta.doc_types
if search_class_kwargs:
search_class = partial(search_class, **search_class_kwargs)
if record_loaders:
record_loaders = {mime: obj_or_import_string(func)
for mime, func in record_loaders.items()}
record_serializers = {mime: obj_or_import_string(func)
for mime, func in record_serializers.items()}
search_serializers = {mime: obj_or_import_string(func)
for mime, func in search_serializers.items()}
resolver = Resolver(pid_type=pid_type, object_type='rec',
getter=partial(record_class.get_record,
with_deleted=True))
# import deposit here in order to avoid dependency loop
from b2share.modules.deposit.api import Deposit
from b2share.modules.deposit.serializers import json_v1_response as deposit_serializer
list_view = B2ShareRecordsListResource.as_view(
RecordsListResource.view_name.format(endpoint),
resolver=resolver,
minter_name=pid_minter,
pid_type=pid_type,
pid_fetcher=pid_fetcher,
read_permission_factory=read_permission_factory,
create_permission_factory=create_permission_factory,
# replace the record serializer with deposit serializer as it
# is used only when the deposit is created.
record_serializers={
'application/json': deposit_serializer
},
record_loaders=record_loaders,
search_serializers=search_serializers,
search_class=search_class,
default_media_type=default_media_type,
max_result_window=max_result_window,
search_factory=(obj_or_import_string(
search_factory_imp, default=default_search_factory
)),
item_links_factory=links_factory,
record_class=Deposit,
)
item_view = B2ShareRecordResource.as_view(
B2ShareRecordResource.view_name.format(endpoint),
resolver=resolver,
read_permission_factory=read_permission_factory,
update_permission_factory=update_permission_factory,
delete_permission_factory=delete_permission_factory,
serializers=record_serializers,
loaders=record_loaders,
search_class=search_class,
links_factory=links_factory,
default_media_type=default_media_type)
versions_view = RecordsVersionsResource.as_view(
RecordsVersionsResource.view_name.format(endpoint),
resolver=resolver)
abuse_view = RecordsAbuseResource.as_view(
RecordsAbuseResource.view_name.format(endpoint),
resolver=resolver)
access_view = RequestAccessResource.as_view(
RequestAccessResource.view_name.format(endpoint),
resolver=resolver)
views = [
dict(rule=list_route, view_func=list_view),
dict(rule=item_route, view_func=item_view),
dict(rule=item_route + '/abuse', view_func=abuse_view),
dict(rule=item_route + '/accessrequests', view_func=access_view),
# Special case for versioning as the parent PID is redirected.
dict(rule='/api/records/<pid_value>/versions', view_func=versions_view),
]
if suggesters:
suggest_view = SuggestResource.as_view(
SuggestResource.view_name.format(endpoint),
suggesters=suggesters,
search_class=search_class,
)
views.append(dict(
rule=list_route + '_suggest',
view_func=suggest_view
))
if use_options_view:
options_view = RecordsListOptionsResource.as_view(
RecordsListOptionsResource.view_name.format(endpoint),
search_index=search_index,
max_result_window=max_result_window,
default_media_type=default_media_type,
search_media_types=search_serializers.keys(),
item_media_types=record_serializers.keys(),
)
return [
dict(rule="{0}_options".format(list_route), view_func=options_view)
] + views
return views
class MyContentNegotiatedMethodView(ContentNegotiatedMethodView):
"""MethodView with content negotiation.
Dispatch HTTP requests as MethodView does and build responses using the
registered serializers. It chooses the right serializer using the request's
accept type. It also provides a helper method for handling ETags.
"""
def __init__(self, serializers=None, method_serializers=None,
serializers_query_aliases=None, default_media_type=None,
default_method_media_type=None, *args, **kwargs):
"""Register the serializing functions.
Serializing functions will receive all named and non named arguments
provided to ``make_response`` or returned by request handling methods.
Recommended prototype is: ``serializer(data, code=200, headers=None)``
and it should return :class:`flask.Response` instances.
Serializing functions can also be overridden by setting
``self.serializers``.
:param serializers: A mapping from mediatype to a serializer function.
:param method_serializers: A mapping of HTTP method name (GET, PUT,
PATCH, POST, DELETE) -> dict(mediatype -> serializer function). If
set, it overrides the serializers dict.
:param serializers_query_aliases: A mapping of values of the defined
query arg (see `config.REST_MIMETYPE_QUERY_ARG_NAME`) to valid
mimetypes: dict(alias -> mimetype).
:param default_media_type: Default media type used if no accept type
has been provided and global serializers are used for the request.
Can be None if there is only one global serializer or None. This
media type is used for method serializers too if
``default_method_media_type`` is not set.
:param default_method_media_type: Default media type used if no accept
type has been provided and a specific method serializers are used
for the request. Can be ``None`` if the method has only one
serializer or ``None``.
"""
super(MyContentNegotiatedMethodView, self).__init__()
self.serializers = serializers or None
self.default_media_type = default_media_type
self.default_method_media_type = default_method_media_type or {}
# set default default media_types if none has been given
if self.serializers and not self.default_media_type:
if len(self.serializers) == 1:
self.default_media_type = next(iter(self.serializers.keys()))
elif len(self.serializers) > 1:
raise ValueError('Multiple serializers with no default media'
' type')
# set method serializers
self.method_serializers = ({key.upper(): func for key, func in
method_serializers.items()} if
method_serializers else {})
# set serializer aliases
self.serializers_query_aliases = serializers_query_aliases or {}
# create default method media_types dict if none has been given
if self.method_serializers and not self.default_method_media_type:
self.default_method_media_type = {}
for http_method, meth_serial in self.method_serializers.items():
if len(self.method_serializers[http_method]) == 1:
self.default_method_media_type[http_method] = \
next(iter(self.method_serializers[http_method].keys()))
elif len(self.method_serializers[http_method]) > 1:
# try to use global default media type
if default_media_type in \
self.method_serializers[http_method]:
self.default_method_media_type[http_method] = \
default_media_type
else:
raise ValueError('Multiple serializers for method {0}'
'with no default media type'.format(
http_method))
class B2ShareRecordsListResource(MyContentNegotiatedMethodView):
"""Resource for records listing."""
view_name | |
<filename>Project/Project.py
'''
LELEC2870 - Machine Learning: Project code
Date: 20/12/2019
Authors:
- <NAME>
- <NAME>
Non standard package to install with pip:
- torch
- tikzplotlib (optional)
'''
save_mut_corr = False
compute_PCA = False
save_PCA = False
compute_bootstrap = False
save_bootstrap = False
compute_lasso = False
save_lasso = False
compute_RR = False
save_RR = False
compute_tree = False
save_tree = False
compute_adaboost_tree = False
save_adaboost_tree = False
compute_rand_tree = False
save_rand_tree = False
compute_boost_tree = False
save_boost_tree = False
compute_KNN = False
save_KNN = False
compute_MLP_neurons = False
compute_MLP_epochs = False
compute_MLP_best = False
save_MLP = False
import numpy as np
import math
import tikzplotlib
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime
import seaborn as sns
from sklearn.metrics import mutual_info_score
import pandas as pd
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsRegressor
from mlxtend.evaluate import bootstrap_point632_score
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.dataset import Dataset
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.metrics import mean_squared_error
def load_data():
def data_processing(Xpd):
Xpd = time_to_coord(Xpd)
Xpd = wd_to_coord(Xpd)
X_full = Xpd
X_full = X_full.drop(columns="wd")
#X_full = X_full.drop(columns="WSPM")
X_full = X_full.drop(columns="wdc")
X_full = X_full.drop(columns="year")
X_full = X_full.drop(columns="month")
X_full = X_full.drop(columns="day")
X_full = X_full.drop(columns="hour")
# X_full = X_full.to_numpy()
X_full_col = X_full.columns
scaler = preprocessing.StandardScaler()
X_full = scaler.fit_transform(X_full) # Smaller error with scale instead of normalize
X_full = pd.DataFrame(X_full)
X_full.columns = X_full_col
return X_full
Xpd = pd.read_csv("Datasets/X1.csv", sep=',')
Xpd2 = pd.read_csv("Datasets/X2.csv", sep=',')
X_full = data_processing(Xpd)
X_full2 = data_processing(Xpd2)
Y = pd.read_csv("Datasets/Y1.csv", header=None, names=['PM2.5'])
return Xpd, X_full, Y, X_full2
# New features: time, day time (Earth spin), year time (Earth rotation around Sun)
def time_to_coord(X1):
time = np.zeros(X1.shape[0])
theta_day = np.zeros(X1.shape[0])
theta_year = np.zeros(X1.shape[0])
for index, row in X1.iterrows():
t = datetime(row['year'], row['month'], row['day'], row['hour'])
time[index] = (t - datetime(2013, 1, 1)).total_seconds()
time_year = (t - datetime(row['year'], 1, 1)).total_seconds()
theta_year[index] = 2 * np.pi * time_year / (
datetime(row['year'] + 1, 1, 1) - datetime(row['year'], 1, 1)).total_seconds()
theta_day[index] = 2 * np.pi * row['hour'] / 24
cos_theta_year = np.cos(theta_year)
sin_theta_year = np.sin(theta_year)
cos_theta_day = np.cos(theta_day)
sin_theta_day = np.sin(theta_day)
X1.insert(X1.shape[1], "time", time, True)
X1.insert(X1.shape[1], "syear", sin_theta_year, True)
X1.insert(X1.shape[1], "cyear", cos_theta_year, True)
X1.insert(X1.shape[1], "sday", sin_theta_day, True)
X1.insert(X1.shape[1], "cday", cos_theta_day, True)
return X1
# Polar to cartesian wind coordinates
def wd_to_coord(X1):
for index, row in X1.iterrows():
if row['wd'] == 'E':
X1.at[index, 'wdc'] = 2 * np.pi * (0 / 16)
elif row['wd'] == 'ENE':
X1.at[index, 'wdc'] = 2 * np.pi * (1 / 16)
elif row['wd'] == 'NE':
X1.at[index, 'wdc'] = 2 * np.pi * (2 / 16)
elif row['wd'] == 'NNE':
X1.at[index, 'wdc'] = 2 * np.pi * (3 / 16)
elif row['wd'] == 'N':
X1.at[index, 'wdc'] = 2 * np.pi * (4 / 16)
elif row['wd'] == 'NNW':
X1.at[index, 'wdc'] = 2 * np.pi * (5 / 16)
elif row['wd'] == 'NW':
X1.at[index, 'wdc'] = 2 * np.pi * (6 / 16)
elif row['wd'] == 'WNW':
X1.at[index, 'wdc'] = 2 * np.pi * (7 / 16)
elif row['wd'] == 'W':
X1.at[index, 'wdc'] = 2 * np.pi * (8 / 16)
elif row['wd'] == 'WSW':
X1.at[index, 'wdc'] = 2 * np.pi * (9 / 16)
elif row['wd'] == 'SW':
X1.at[index, 'wdc'] = 2 * np.pi * (10 / 16)
elif row['wd'] == 'SSW':
X1.at[index, 'wdc'] = 2 * np.pi * (11 / 16)
elif row['wd'] == 'S':
X1.at[index, 'wdc'] = 2 * np.pi * (12 / 16)
elif row['wd'] == 'SSE':
X1.at[index, 'wdc'] = 2 * np.pi * (13 / 16)
elif row['wd'] == 'SE':
X1.at[index, 'wdc'] = 2 * np.pi * (14 / 16)
else:
X1.at[index, 'wdc'] = 2 * np.pi * (15 / 16)
wd = X1.loc[:, ['wdc']].to_numpy()
#wd_speed = X1.loc[:, ['WSPM']].to_numpy()
sin_wd = np.sin(wd)# * wd_speed
cos_wd = np.cos(wd)# * wd_speed
X1.insert(13, "swd", sin_wd, True)
X1.insert(14, "cwd", cos_wd, True)
return X1
def rmse(predictions, targets):
return math.sqrt(mean_squared_error(predictions, targets))
# Correlation
def project_correlation(X, Y, fig=True):
print('Correlation...')
XY = pd.concat([X, Y], axis=1)
corr = XY.corr()
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True
with sns.axes_style("white"):
sns.heatmap(np.abs(corr), mask=mask, annot=True, cmap=plt.cm.Reds, vmin=0, vmax=1, linewidths=.5).set_title('Correlation')
if save_mut_corr:
tikzplotlib.save("LaTeX/correlation.tex")
plt.show()
return corr
# Mutual information (buggy, not the same)
def project_mutual_info(X, Y):
print('Mutual information...')
XY = pd.concat([X, Y], axis=1)
XY_col = XY.columns
XY = XY.values
n = XY.shape[1]
mut = np.zeros((n, n))
for i in range(n):
for j in range(n):
mut[i, j] = mutual_info_score(XY[:, i], XY[:, j]) if j >= i else mut[j, i]
diag_mut = np.copy(np.diag(mut))
for i in range(n):
for j in range(n):
mut[i, j] = mut[i, j] / math.sqrt(diag_mut[i] * diag_mut[j])
mut = pd.DataFrame(mut)
mut.columns = XY_col
mut.index = mut.columns
mask = np.zeros_like(mut)
mask[np.triu_indices_from(mask)] = True
with sns.axes_style("white"):
sns.heatmap(np.abs(mut), mask=mask, annot=True, cmap=plt.cm.Reds, vmin=0, vmax=1, linewidths=.5).set_title('Mutual Information')
if save_mut_corr:
tikzplotlib.save("LaTeX/MI.tex")
plt.show()
return mut
#Error analysis for the Bootstrap on linear regression
def error_analysis_bootstrap(X, Y):
n_splits = 10
n = 1000
error = np.zeros((n, n_splits))
if compute_bootstrap:
for i in range(n):
error[i,:] = np.sqrt(bootstrap_point632_score(LinearRegression(), X, Y, n_splits=n_splits))
np.save('numpy/error_bootstrap.npy', error)
else:
error = np.load('numpy/error_bootstrap.npy')
print(np.std(np.mean(error,1)))
print(np.std(np.reshape(error,(n*n_splits,1))[:n]))
bins=np.arange(42,47,0.1)
fig, ax = plt.subplots(1,1)
ax.hist(np.mean(error,1), bins=bins, facecolor='blue', alpha=0.7, label='10 splits')
ax.hist(np.reshape(error,(n*n_splits,1))[:n], bins=bins, facecolor='red', alpha=0.7, label='1 split')
ax.set_xticks(bins[:-1])
plt.xticks(np.arange(42,47.4,0.5))
plt.xlabel('Error [ug/m^3]')
plt.ylabel('Samples')
plt.legend()
if save_bootstrap:
tikzplotlib.save("LaTeX/error_analysis_bootstrap.tex")
plt.show()
#PCA error for different numbers of principal components
def error_PCA(X, Y):
n = 17
PC = np.arange(1, n+1, 1)
error = np.zeros((n,))
if compute_PCA:
for i in PC:
X_PCA = PCA(n_components=i).fit_transform(X)
error[i-1] = np.sqrt(np.mean(bootstrap_point632_score(LinearRegression(), X_PCA, Y, n_splits=50)))
np.save('numpy/error_PCA_components.npy', error)
else:
error = np.load('numpy/error_PCA_components.npy')
plt.scatter(PC, error)
plt.xlabel('Number of components')
plt.ylabel('Error [ug/m^3]')
if save_PCA:
tikzplotlib.save("LaTeX/error_PCA.tex")
plt.show()
# Linear regression
def project_linear_regression(X, Y, method_name):
print('Linear regression...')
model = LinearRegression()
Y_pred = model.fit(X, Y).predict(X)
RMSE = rmse(Y, Y_pred)
print('Linear regression RMSE', method_name, ' :', RMSE)
error = math.sqrt(np.mean(bootstrap_point632_score(model, X, Y)))
print('Linear regression bootstrap 632 error', method_name, ' :', error)
return error
# Lasso (no feature selection)
def project_lasso(X_full_pd, X_select, X_PCA, Y):
print('Lasso...')
X_full = X_full_pd.values
lambda_lasso = np.logspace(-2.0, 1.5, num=50)
error_lasso_full = compute_error_lasso(X_full, Y, lambda_lasso, 'full')
error_lasso_select = compute_error_lasso(X_select, Y, lambda_lasso, 'select')
error_lasso_PCA = compute_error_lasso(X_PCA, Y, lambda_lasso, 'PCA')
plt.xscale('log')
plt.scatter(lambda_lasso, error_lasso_full, label='Full features')
plt.scatter(lambda_lasso, error_lasso_select, label='Selected features')
plt.scatter(lambda_lasso, error_lasso_PCA, label='PCA features')
plt.xlabel('lambda')
plt.ylabel('Error [ug/m^3]')
plt.legend()
if save_lasso:
tikzplotlib.save("LaTeX/lasso.tex")
plt.show()
plot_weight_lasso(X_full_pd, Y)
def plot_weight_lasso(Xpd, Y):
model = Lasso(alpha=0.1)
model.fit(Xpd.values, Y)
coef = pd.Series(model.coef_, index = Xpd.columns)
imp_coef = pd.concat([coef.sort_values().head(10), coef.sort_values().tail(10)])
matplotlib.rcParams['figure.figsize'] = (8.0, 10.0)
imp_coef.plot(kind = "barh")
plt.xlabel('Weight')
if save_lasso:
tikzplotlib.save("LaTeX/lasso_weights.tex")
plt.show()
def compute_error_lasso(X, Y, lambda_lasso, method_name):
if compute_lasso:
error_lasso = np.zeros(lambda_lasso.shape)
for i in range(0, 50):
model = Lasso(alpha=lambda_lasso[i])
Y_pred = model.fit(X, Y).predict(X)
RMSE = rmse(Y, Y_pred)
error_lasso[i] = RMSE
print(method_name, 'RMSE ( lambda =', lambda_lasso[i], ') :', RMSE)
error_lasso[i] = math.sqrt(np.mean(bootstrap_point632_score(model, X, Y.ravel(), n_splits=10)))
print(method_name, 'bootstrap 632 error ( lambda =', lambda_lasso[i], ') :', error_lasso[i])
np.save('numpy/error_lasso_{}.npy'.format(method_name), error_lasso)
else:
error_lasso = np.load('numpy/error_lasso_{}.npy'.format(method_name))
return error_lasso
# Ridge regression
def project_ridge_regression(X_full, X_select, X_PCA, Y):
print('Ridge regression...')
error_RR_full = compute_error_RR(X_full, Y, 'full')
error_RR_select = compute_error_RR(X_select, Y, 'select')
error_RR_PCA = compute_error_RR(X_PCA, Y, 'PCA')
alpha_RR = np.logspace(-2.0, 4.0, num=50)
plt.xscale('log')
plt.scatter(alpha_RR, error_RR_full, label='Full features')
plt.scatter(alpha_RR, error_RR_select, label='Selected features')
plt.scatter(alpha_RR, error_RR_PCA, label='PCA features')
plt.xlabel('lambda')
plt.ylabel('Error [ug/m^3]')
plt.legend()
if save_RR:
tikzplotlib.save("LaTeX/RR.tex")
plt.show()
def compute_error_RR(X, Y, method_name):
alpha_RR = np.logspace(-2.0, 4.0, num=50)
if compute_RR:
error_RR = np.zeros(50)
for i in range(0, 50):
model = Ridge(alpha = alpha_RR[i])
Y_pred = model.fit(X, Y).predict(X)
RMSE = rmse(Y, Y_pred)
error_RR[i] = math.sqrt(np.mean(bootstrap_point632_score(model, X, Y, n_splits=100)))
print(method_name, 'RMSE ( k =', alpha_RR[i], ') :', RMSE)
print(method_name, 'bootstrap 632 error ( k =', alpha_RR[i], ') :', error_RR[i])
np.save('numpy/error_RR_{}.npy'.format(method_name), error_RR)
else:
error_RR = np.load('numpy/error_RR_{}.npy'.format(method_name))
return error_RR
# Tree regression
def project_tree_regression(X_full, X_select, X_PCA, Y, n):
print('Tree regression...')
error_tree_full = compute_error_tree(X_full, Y, 'full', n)
error_tree_select = compute_error_tree(X_select, Y, 'select', n)
error_tree_PCA = compute_error_tree(X_PCA, Y, 'PCA', n)
depth = np.arange(1, n + 1, 1)
plt.scatter(depth, error_tree_full, label='Full features')
plt.scatter(depth, error_tree_select, label='Selected features')
plt.scatter(depth, error_tree_PCA, label='PCA features')
plt.xlabel('Max depth')
plt.ylabel('Error [ug/m^3]')
plt.legend()
if save_tree:
tikzplotlib.save("LaTeX/tree.tex")
plt.show()
def compute_error_tree(X, Y, method_name, n):
depth = np.arange(1, n + 1, 1)
if compute_tree:
error_tree = np.zeros(n)
for i in | |
self).__init__(**kwargs)
self.scheme = scheme
self.service_name = service_name
class ServiceLoadMetric(msrest.serialization.Model):
"""Specifies a metric to load balance a service during runtime.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the metric. If the service chooses to report load during
runtime, the load metric name should match the name that is specified in Name exactly. Note
that metric names are case sensitive.
:type name: str
:param weight: The service load metric relative weight, compared to other metrics configured
for this service, as a number. Possible values include: "Zero", "Low", "Medium", "High".
:type weight: str or
~service_fabric_managed_clusters_management_client.models.ServiceLoadMetricWeight
:param primary_default_load: Used only for Stateful services. The default amount of load, as a
number, that this service creates for this metric when it is a Primary replica.
:type primary_default_load: int
:param secondary_default_load: Used only for Stateful services. The default amount of load, as
a number, that this service creates for this metric when it is a Secondary replica.
:type secondary_default_load: int
:param default_load: Used only for Stateless services. The default amount of load, as a number,
that this service creates for this metric.
:type default_load: int
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'weight': {'key': 'weight', 'type': 'str'},
'primary_default_load': {'key': 'primaryDefaultLoad', 'type': 'int'},
'secondary_default_load': {'key': 'secondaryDefaultLoad', 'type': 'int'},
'default_load': {'key': 'defaultLoad', 'type': 'int'},
}
def __init__(
self,
*,
name: str,
weight: Optional[Union[str, "ServiceLoadMetricWeight"]] = None,
primary_default_load: Optional[int] = None,
secondary_default_load: Optional[int] = None,
default_load: Optional[int] = None,
**kwargs
):
super(ServiceLoadMetric, self).__init__(**kwargs)
self.name = name
self.weight = weight
self.primary_default_load = primary_default_load
self.secondary_default_load = secondary_default_load
self.default_load = default_load
class ServicePlacementPolicy(msrest.serialization.Model):
"""Describes the policy to be used for placement of a Service Fabric service.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ServicePlacementInvalidDomainPolicy, ServicePlacementNonPartiallyPlaceServicePolicy, ServicePlacementPreferPrimaryDomainPolicy, ServicePlacementRequiredDomainPolicy, ServicePlacementRequireDomainDistributionPolicy.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of placement policy for a service fabric service. Following are
the possible values.Constant filled by server. Possible values include: "InvalidDomain",
"RequiredDomain", "PreferredPrimaryDomain", "RequiredDomainDistribution",
"NonPartiallyPlaceService".
:type type: str or
~service_fabric_managed_clusters_management_client.models.ServicePlacementPolicyType
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
}
_subtype_map = {
'type': {'InvalidDomain': 'ServicePlacementInvalidDomainPolicy', 'NonPartiallyPlaceService': 'ServicePlacementNonPartiallyPlaceServicePolicy', 'PreferredPrimaryDomain': 'ServicePlacementPreferPrimaryDomainPolicy', 'RequiredDomain': 'ServicePlacementRequiredDomainPolicy', 'RequiredDomainDistribution': 'ServicePlacementRequireDomainDistributionPolicy'}
}
def __init__(
self,
**kwargs
):
super(ServicePlacementPolicy, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class ServicePlacementInvalidDomainPolicy(ServicePlacementPolicy):
"""Describes the policy to be used for placement of a Service Fabric service where a particular fault or upgrade domain should not be used for placement of the instances or replicas of that service.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of placement policy for a service fabric service. Following are
the possible values.Constant filled by server. Possible values include: "InvalidDomain",
"RequiredDomain", "PreferredPrimaryDomain", "RequiredDomainDistribution",
"NonPartiallyPlaceService".
:type type: str or
~service_fabric_managed_clusters_management_client.models.ServicePlacementPolicyType
:param domain_name: Required. The name of the domain that should not be used for placement.
:type domain_name: str
"""
_validation = {
'type': {'required': True},
'domain_name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'domain_name': {'key': 'domainName', 'type': 'str'},
}
def __init__(
self,
*,
domain_name: str,
**kwargs
):
super(ServicePlacementInvalidDomainPolicy, self).__init__(**kwargs)
self.type = 'InvalidDomain' # type: str
self.domain_name = domain_name
class ServicePlacementNonPartiallyPlaceServicePolicy(ServicePlacementPolicy):
"""ServicePlacementNonPartiallyPlaceServicePolicy.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of placement policy for a service fabric service. Following are
the possible values.Constant filled by server. Possible values include: "InvalidDomain",
"RequiredDomain", "PreferredPrimaryDomain", "RequiredDomainDistribution",
"NonPartiallyPlaceService".
:type type: str or
~service_fabric_managed_clusters_management_client.models.ServicePlacementPolicyType
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServicePlacementNonPartiallyPlaceServicePolicy, self).__init__(**kwargs)
self.type = 'NonPartiallyPlaceService' # type: str
class ServicePlacementPreferPrimaryDomainPolicy(ServicePlacementPolicy):
"""Describes the policy to be used for placement of a Service Fabric service where the service's
Primary replicas should optimally be placed in a particular domain.
This placement policy is usually used with fault domains in scenarios where the Service Fabric
cluster is geographically distributed in order to indicate that a service's primary replica should
be located in a particular fault domain, which in geo-distributed scenarios usually aligns with regional
or datacenter boundaries. Note that since this is an optimization it is possible that the Primary replica
may not end up located in this domain due to failures, capacity limits, or other constraints.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of placement policy for a service fabric service. Following are
the possible values.Constant filled by server. Possible values include: "InvalidDomain",
"RequiredDomain", "PreferredPrimaryDomain", "RequiredDomainDistribution",
"NonPartiallyPlaceService".
:type type: str or
~service_fabric_managed_clusters_management_client.models.ServicePlacementPolicyType
:param domain_name: Required. The name of the domain that should used for placement as per this
policy.
:type domain_name: str
"""
_validation = {
'type': {'required': True},
'domain_name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'domain_name': {'key': 'domainName', 'type': 'str'},
}
def __init__(
self,
*,
domain_name: str,
**kwargs
):
super(ServicePlacementPreferPrimaryDomainPolicy, self).__init__(**kwargs)
self.type = 'PreferredPrimaryDomain' # type: str
self.domain_name = domain_name
class ServicePlacementRequiredDomainPolicy(ServicePlacementPolicy):
"""Describes the policy to be used for placement of a Service Fabric service where the instances or replicas of that service must be placed in a particular domain.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of placement policy for a service fabric service. Following are
the possible values.Constant filled by server. Possible values include: "InvalidDomain",
"RequiredDomain", "PreferredPrimaryDomain", "RequiredDomainDistribution",
"NonPartiallyPlaceService".
:type type: str or
~service_fabric_managed_clusters_management_client.models.ServicePlacementPolicyType
:param domain_name: Required. The name of the domain that should used for placement as per this
policy.
:type domain_name: str
"""
_validation = {
'type': {'required': True},
'domain_name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'domain_name': {'key': 'domainName', 'type': 'str'},
}
def __init__(
self,
*,
domain_name: str,
**kwargs
):
super(ServicePlacementRequiredDomainPolicy, self).__init__(**kwargs)
self.type = 'RequiredDomain' # type: str
self.domain_name = domain_name
class ServicePlacementRequireDomainDistributionPolicy(ServicePlacementPolicy):
"""Describes the policy to be used for placement of a Service Fabric service where two replicas
from the same partition should never be placed in the same fault or upgrade domain.
While this is not common it can expose the service to an increased risk of concurrent failures
due to unplanned outages or other cases of subsequent/concurrent failures. As an example, consider
a case where replicas are deployed across different data center, with one replica per location.
In the event that one of the datacenters goes offline, normally the replica that was placed in that
datacenter will be packed into one of the remaining datacenters. If this is not desirable then this
policy should be set.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of placement policy for a service fabric service. Following are
the possible values.Constant filled by server. Possible values include: "InvalidDomain",
"RequiredDomain", "PreferredPrimaryDomain", "RequiredDomainDistribution",
"NonPartiallyPlaceService".
:type type: str or
~service_fabric_managed_clusters_management_client.models.ServicePlacementPolicyType
:param domain_name: Required. The name of the domain that should used for placement as per this
policy.
:type domain_name: str
"""
_validation = {
'type': {'required': True},
'domain_name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'domain_name': {'key': 'domainName', 'type': 'str'},
}
def __init__(
self,
*,
domain_name: str,
**kwargs
):
super(ServicePlacementRequireDomainDistributionPolicy, self).__init__(**kwargs)
self.type = 'RequiredDomainDistribution' # type: str
self.domain_name = domain_name
class ServiceResource(ProxyResource):
"""The service resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Azure resource identifier.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: Resource location depends on the parent resource.
:type location: str
:param tags: A set of tags. Azure resource tags.
:type tags: dict[str, str]
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~service_fabric_managed_clusters_management_client.models.SystemData
:param properties: The service resource properties.
:type properties:
~service_fabric_managed_clusters_management_client.models.ServiceResourceProperties
"""
_validation = {
'id': {'readonly': | |
assert sess.query(Blub).get(f.id) is None
assert sess.query(Blub).get(b.id) is None
assert sess.query(Bar).get(f.id) is None
self.assert_sql_count(testing.db, go, 0)
else:
# this is testing the 'wrong' behavior of using get()
# polymorphically with mappers that are not configured to be
# polymorphic. the important part being that get() always
# returns an instance of the query's type.
def go():
assert sess.query(Foo).get(f.id) is f
bb = sess.query(Foo).get(b.id)
assert isinstance(b, Foo) and bb.id == b.id
bll = sess.query(Foo).get(bl.id)
assert isinstance(bll, Foo) and bll.id == bl.id
assert sess.query(Bar).get(b.id) is b
bll = sess.query(Bar).get(bl.id)
assert isinstance(bll, Bar) and bll.id == bl.id
assert sess.query(Blub).get(bl.id) is bl
self.assert_sql_count(testing.db, go, 3)
class EagerLazyTest(fixtures.MappedTest):
"""tests eager load/lazy load of child items off inheritance mappers, tests
that LazyLoader constructs the right query condition."""
@classmethod
def define_tables(cls, metadata):
global foo, bar, bar_foo
foo = Table(
"foo",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
bar = Table(
"bar",
metadata,
Column("id", Integer, ForeignKey("foo.id"), primary_key=True),
Column("bar_data", String(30)),
)
bar_foo = Table(
"bar_foo",
metadata,
Column("bar_id", Integer, ForeignKey("bar.id")),
Column("foo_id", Integer, ForeignKey("foo.id")),
)
def test_basic(self):
class Foo(object):
pass
class Bar(Foo):
pass
foos = mapper(Foo, foo)
bars = mapper(Bar, bar, inherits=foos)
bars.add_property("lazy", relationship(foos, bar_foo, lazy="select"))
bars.add_property("eager", relationship(foos, bar_foo, lazy="joined"))
foo.insert().execute(data="foo1")
bar.insert().execute(id=1, data="bar1")
foo.insert().execute(data="foo2")
bar.insert().execute(id=2, data="bar2")
foo.insert().execute(data="foo3") # 3
foo.insert().execute(data="foo4") # 4
bar_foo.insert().execute(bar_id=1, foo_id=3)
bar_foo.insert().execute(bar_id=2, foo_id=4)
sess = create_session()
q = sess.query(Bar)
self.assert_(len(q.first().lazy) == 1)
self.assert_(len(q.first().eager) == 1)
class EagerTargetingTest(fixtures.MappedTest):
"""test a scenario where joined table inheritance might be
confused as an eagerly loaded joined table."""
@classmethod
def define_tables(cls, metadata):
Table(
"a_table",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
Column("type", String(30), nullable=False),
Column("parent_id", Integer, ForeignKey("a_table.id")),
)
Table(
"b_table",
metadata,
Column("id", Integer, ForeignKey("a_table.id"), primary_key=True),
Column("b_data", String(50)),
)
def test_adapt_stringency(self):
b_table, a_table = self.tables.b_table, self.tables.a_table
class A(fixtures.ComparableEntity):
pass
class B(A):
pass
mapper(
A,
a_table,
polymorphic_on=a_table.c.type,
polymorphic_identity="A",
properties={"children": relationship(A, order_by=a_table.c.name)},
)
mapper(
B,
b_table,
inherits=A,
polymorphic_identity="B",
properties={
"b_derived": column_property(b_table.c.b_data + "DATA")
},
)
sess = create_session()
b1 = B(id=1, name="b1", b_data="i")
sess.add(b1)
sess.flush()
b2 = B(id=2, name="b2", b_data="l", parent_id=1)
sess.add(b2)
sess.flush()
bid = b1.id
sess.expunge_all()
node = sess.query(B).filter(B.id == bid).all()[0]
eq_(node, B(id=1, name="b1", b_data="i"))
eq_(node.children[0], B(id=2, name="b2", b_data="l"))
sess.expunge_all()
node = (
sess.query(B)
.options(joinedload(B.children))
.filter(B.id == bid)
.all()[0]
)
eq_(node, B(id=1, name="b1", b_data="i"))
eq_(node.children[0], B(id=2, name="b2", b_data="l"))
class FlushTest(fixtures.MappedTest):
"""test dependency sorting among inheriting mappers"""
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("email", String(128)),
Column("password", String(16)),
)
Table(
"roles",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("description", String(32)),
)
Table(
"user_roles",
metadata,
Column(
"user_id", Integer, ForeignKey("users.id"), primary_key=True
),
Column(
"role_id", Integer, ForeignKey("roles.id"), primary_key=True
),
)
Table(
"admins",
metadata,
Column(
"admin_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("user_id", Integer, ForeignKey("users.id")),
)
def test_one(self):
admins, users, roles, user_roles = (
self.tables.admins,
self.tables.users,
self.tables.roles,
self.tables.user_roles,
)
class User(object):
pass
class Role(object):
pass
class Admin(User):
pass
mapper(Role, roles)
user_mapper = mapper(
User,
users,
properties={
"roles": relationship(
Role, secondary=user_roles, lazy="joined"
)
},
)
mapper(Admin, admins, inherits=user_mapper)
sess = create_session()
adminrole = Role()
sess.add(adminrole)
sess.flush()
# create an Admin, and append a Role. the dependency processors
# corresponding to the "roles" attribute for the Admin mapper and the
# User mapper have to ensure that two dependency processors don't fire
# off and insert the many to many row twice.
a = Admin()
a.roles.append(adminrole)
a.password = "<PASSWORD>"
sess.add(a)
sess.flush()
eq_(select([func.count("*")]).select_from(user_roles).scalar(), 1)
def test_two(self):
admins, users, roles, user_roles = (
self.tables.admins,
self.tables.users,
self.tables.roles,
self.tables.user_roles,
)
class User(object):
def __init__(self, email=None, password=<PASSWORD>):
self.email = email
self.password = password
class Role(object):
def __init__(self, description=None):
self.description = description
class Admin(User):
pass
mapper(Role, roles)
user_mapper = mapper(
User,
users,
properties={
"roles": relationship(
Role, secondary=user_roles, lazy="joined"
)
},
)
mapper(Admin, admins, inherits=user_mapper)
# create roles
adminrole = Role("admin")
sess = create_session()
sess.add(adminrole)
sess.flush()
# create admin user
a = Admin(email="tim", password="<PASSWORD>")
a.roles.append(adminrole)
sess.add(a)
sess.flush()
a.password = "<PASSWORD>"
sess.flush()
eq_(select([func.count("*")]).select_from(user_roles).scalar(), 1)
class PassiveDeletesTest(fixtures.MappedTest):
__requires__ = ("foreign_keys",)
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column("id", Integer, primary_key=True),
Column("type", String(30)),
)
Table(
"b",
metadata,
Column(
"id",
Integer,
ForeignKey("a.id", ondelete="CASCADE"),
primary_key=True,
),
Column("data", String(10)),
)
Table(
"c",
metadata,
Column("cid", Integer, primary_key=True),
Column("bid", ForeignKey("b.id", ondelete="CASCADE")),
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(A):
pass
class C(B):
pass
def _fixture(self, a_p=False, b_p=False, c_p=False):
A, B, C = self.classes("A", "B", "C")
a, b, c = self.tables("a", "b", "c")
mapper(
A,
a,
passive_deletes=a_p,
polymorphic_on=a.c.type,
polymorphic_identity="a",
)
mapper(B, b, inherits=A, passive_deletes=b_p, polymorphic_identity="b")
mapper(C, c, inherits=B, passive_deletes=c_p, polymorphic_identity="c")
def test_none(self):
A, B, C = self.classes("A", "B", "C")
self._fixture()
s = Session()
a1, b1, c1 = A(id=1), B(id=2), C(cid=1, id=3)
s.add_all([a1, b1, c1])
s.commit()
# want to see if the 'C' table loads even though
# a and b are loaded
c1 = s.query(B).filter_by(id=3).first()
s.delete(c1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
RegexSQL(
"SELECT .* " "FROM c WHERE :param_1 = c.bid", [{"param_1": 3}]
),
CompiledSQL("DELETE FROM c WHERE c.cid = :cid", [{"cid": 1}]),
CompiledSQL("DELETE FROM b WHERE b.id = :id", [{"id": 3}]),
CompiledSQL("DELETE FROM a WHERE a.id = :id", [{"id": 3}]),
)
def test_c_only(self):
A, B, C = self.classes("A", "B", "C")
self._fixture(c_p=True)
s = Session()
a1, b1, c1 = A(id=1), B(id=2), C(cid=1, id=3)
s.add_all([a1, b1, c1])
s.commit()
s.delete(a1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"SELECT a.id AS a_id, a.type AS a_type "
"FROM a WHERE a.id = :param_1",
[{"param_1": 1}],
),
CompiledSQL("DELETE FROM a WHERE a.id = :id", [{"id": 1}]),
)
b1.id
s.delete(b1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL("DELETE FROM b WHERE b.id = :id", [{"id": 2}]),
CompiledSQL("DELETE FROM a WHERE a.id = :id", [{"id": 2}]),
)
# want to see if the 'C' table loads even though
# a and b are loaded
c1 = s.query(A).filter_by(id=3).first()
s.delete(c1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL("DELETE FROM b WHERE b.id = :id", [{"id": 3}]),
CompiledSQL("DELETE FROM a WHERE a.id = :id", [{"id": 3}]),
)
def test_b_only(self):
A, B, C = self.classes("A", "B", "C")
self._fixture(b_p=True)
s = Session()
a1, b1, c1 = A(id=1), B(id=2), C(cid=1, id=3)
s.add_all([a1, b1, c1])
s.commit()
s.delete(a1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"SELECT a.id AS a_id, a.type AS a_type "
"FROM a WHERE a.id = :param_1",
[{"param_1": 1}],
),
CompiledSQL("DELETE FROM a WHERE a.id = :id", [{"id": 1}]),
)
b1.id
s.delete(b1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL("DELETE FROM a WHERE a.id = :id", [{"id": 2}])
)
c1.id
s.delete(c1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL("DELETE FROM a WHERE a.id = :id", [{"id": 3}])
)
def test_a_only(self):
A, B, C = self.classes("A", "B", "C")
self._fixture(a_p=True)
s = Session()
a1, b1, c1 = A(id=1), B(id=2), C(cid=1, id=3)
s.add_all([a1, b1, c1])
s.commit()
s.delete(a1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"SELECT a.id AS a_id, a.type AS a_type "
"FROM a WHERE a.id = :param_1",
[{"param_1": 1}],
),
CompiledSQL("DELETE FROM a WHERE a.id = :id", [{"id": 1}]),
)
b1.id
s.delete(b1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL("DELETE FROM a WHERE a.id = :id", [{"id": 2}])
)
# want to see if the 'C' table loads even though
# a and b are loaded
c1 = s.query(A).filter_by(id=3).first()
s.delete(c1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL("DELETE FROM a WHERE a.id = :id", [{"id": 3}])
)
class OptimizedGetOnDeferredTest(fixtures.MappedTest):
"""test that the 'optimized get' path accommodates deferred columns."""
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
Table(
"b",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("data", String(10)),
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(A):
pass
@classmethod
def setup_mappers(cls):
A, B = cls.classes("A", "B")
a, b = cls.tables("a", "b")
mapper(A, a)
mapper(
B,
b,
inherits=A,
properties={
"data": deferred(b.c.data),
"expr": column_property(b.c.data + "q", deferred=True),
},
)
def test_column_property(self):
A, B = self.classes("A", "B")
sess = Session()
b1 = B(data="x")
sess.add(b1)
sess.flush()
eq_(b1.expr, "xq")
def test_expired_column(self):
A, B = self.classes("A", "B")
sess = Session()
b1 = B(data="x")
sess.add(b1)
sess.flush()
sess.expire(b1, ["data"])
eq_(b1.data, "x")
class JoinedNoFKSortingTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
Table("b", metadata, Column("id", Integer, primary_key=True))
Table("c", metadata, Column("id", Integer, primary_key=True))
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(A):
pass
class C(A):
pass
@classmethod
def setup_mappers(cls):
A, | |
as read only.
:type removeunique: boolean
:parm specify_props: Optionally set list of properties to be removed instead of the default.
:type specify_props: list
"""
try:
type_str = self.typepath.defs.typestring
currtype = currdict.get(type_str, None)
oridict = copy.deepcopy(currdict)
if specify_props:
templist = specify_props
else:
templist = ["Modified", "Type", "Description", "Status",\
"links", "SettingsResult", "Attributes", \
"@odata.context", "@odata.type", "@odata.id",\
"@odata.etag", "Links", "Actions", \
"AvailableActions", "BiosVersion"]
#Attributes removed and readded later as a validation workaround
currdict = iterateandclear(currdict, templist)
iloversion = self.getiloversion()
if not iloversion:
return currdict
self.validationmanager.validatedict(currdict, currtype=currtype, \
monolith=self.monolith, unique=removeunique, searchtype=None)
if oridict.get("Attributes", None):
currdict["Attributes"] = oridict["Attributes"]
return currdict
except:
if emptyraise is True:
raise EmptyRaiseForEAFP()
elif emptyraise == 'pass':
pass
else:
raise
def getidbytype(self, tpe):
"""Return a list of URIs that correspond to the supplied type string.
:param tpe: type string to search for.
:type tpe: string.
"""
urls = list()
val = next(self.monolith.gettypename(tpe), None)
urls.extend(self.monolith.typesadded[val] if val else [])
return urls
def getcollectionmembers(self, path, fullresp=False):
"""Returns collection/item lists of the provided path.
:param path: path to return.
:type path: string.
:param fullresp: Return full json data instead of only members.
:type path: bool.
:returns: list of collection members
"""
if self.typepath.defs.isgen10 and self.typepath.gencompany \
and '?$expand=.' not in path:
path += '?$expand=.' if path.endswith('/') else '/?$expand=.'
members = self.get_handler(path, service=True, silent=True)
if members and not fullresp:
try:
members = members.dict['Members'] if self.typepath.defs.\
isgen10 else members.dict['Items']
except KeyError:
members = []
elif fullresp:
members = [members.dict]
return members
def getbiosfamilyandversion(self):
"""Function that returns the current BIOS version information."""
self._updatemono(currtype="ComputerSystem.", crawl=False)
try:
for inst in self.monolith.iter("ComputerSystem."):
if "Current" in inst.resp.obj["Bios"]:
oemjson = inst.resp.obj["Bios"]["Current"]
parts = oemjson["VersionString"].split(" ")
return (parts[0], parts[1][1:])
else:
parts = inst.resp.obj["BiosVersion"].split(" ")
return (parts[0], parts[1][1:])
except Exception:
pass
return (None, None)
def getiloversion(self, skipschemas=False):
"""Function that returns the current iLO version.
:param skipschemas: flag to determine whether to skip schema download. If False, this will
also verify if schemas are available.
:type skipschemas: bool
:returns: returns current iLO version
"""
iloversion = self._iloversion = self._iloversion if self._iloversion \
else self.typepath.iloversion
if self.typepath.gencompany and not self._iloversion and not self.typepath.noschemas:
self.monolith.load(self.typepath.defs.managerpath, crawl=False)
results = next(iter(self.getprops('Manager.', ['FirmwareVersion', 'Firmware'])))
def quickdrill(_dict, key):
""" function to find key in nested dictionary """
return _dict[key]
while isinstance(results, dict):
results = quickdrill(results, next(iter(results.keys())))
iloversionlist = results.replace('v', '').replace('.', '').split(' ')
iloversion = float('.'.join(iloversionlist[1:3]))
model = self.getprops('Manager.', ['Model'])
if model:
if next(iter(model))['Model'] == "iLO CM":
# Assume iLO 4 types in Moonshot
iloversion = None
self._iloversion = iloversion
elif not self.typepath.gencompany:#Assume schemas are available somewhere in non-hpe redfish
self._iloversion = iloversion = 4.210
conf = None if not skipschemas else True
if not skipschemas:
if iloversion and iloversion >= 4.210:
conf = self._verifyschemasdownloaded(self.monolith)
elif iloversion and iloversion < 4.210:
warning_handler("Please upgrade to iLO 4 version 2.1 or above for schema support.")
else:
warning_handler("Schema support unavailable on the currently logged in system.")
return iloversion if iloversion and iloversion >= 4.210 and conf else None
def get_selection(self, selector=None, setenable=False, path_refresh=False):
"""Gathers instances and optionally the attributeregistry based on selector.
:param selector: The type selection for the get operation.
:type selector: str.
:param setenable: Flag to determine if registry should also be returned.
:type setenable: boolean.
:param path_refresh: Flag to reload the selected instances.
:type path_refresh: boolean.
:returns: returns a list of selected items
"""
instances = self._getinstances(selector=selector, path_refresh=path_refresh)
if setenable:
attributeregistryfound = getattributeregistry(instances=instances)
instances = skipnonsettingsinst(instances=instances)
return instances, attributeregistryfound
return instances
def create_save_header(self):
"""Adds save file headers to show what server the data came from.
:param selector: The type selection for the get save operation.
:type selector: str.
:param selectignore: Return the save header even if there isn't a selection to add it to.
:type selectignore: boolean
:returns: returns an header ordered dictionary
"""
instances = OrderedDict()
monolith = self.monolith
self._updatemono(currtype="ComputerSystem.", crawl=False)
self._updatemono(currtype=self.typepath.defs.biostype, crawl=False)
self._updatemono(currtype="Manager.", crawl=False)
instances["Comments"] = OrderedDict()
try:
for instance in monolith.iter("ComputerSystem."):
if instance.resp.obj["Manufacturer"]:
instances["Comments"]["Manufacturer"] = \
instance.resp.obj["Manufacturer"]
if instance.resp.obj["Model"]:
instances["Comments"]["Model"] = instance.resp.obj["Model"]
if instance.resp.obj["Oem"][self.typepath.defs.oemhp]["Bios"]["Current"]:
oemjson = instance.resp.obj["Oem"][self.typepath.defs.oemhp]["Bios"]["Current"]
instances["Comments"]["BIOSFamily"] = oemjson["Family"]
instances["Comments"]["BIOSDate"] = oemjson["Date"]
for instance in monolith.iter(self.typepath.defs.biostype):
if "Attributes" in list(instance.resp.obj.keys()) and \
instance.resp.obj["Attributes"]["SerialNumber"]:
instances["Comments"]["SerialNumber"] = \
instance.resp.obj["Attributes"]["SerialNumber"]
elif instance.resp.obj["SerialNumber"]:
instances["Comments"]["SerialNumber"] = instance.resp.obj["SerialNumber"]
for instance in monolith.iter("Manager."):
if instance.resp.obj["FirmwareVersion"]:
instances["Comments"]["iLOVersion"] = instance.resp.obj["FirmwareVersion"]
except Exception:
pass
return instances
def download_path(self, paths, crawl=True, path_refresh=False):
"""Loads paths into the monolith.
:param paths: list of paths to download
:type paths: list
:param path_refresh: Flag to reload the paths or not.
:type path_refresh: bool.
:param crawl: Flag to determine if load should traverse found links.
:type crawl: boolean.
"""
if not paths:
return
try:
list(map(lambda x: self.monolith.load(path=x, init=False, path_refresh=path_refresh,\
crawl=crawl, includelogs=True), paths))
except Exception as excp:
try:
if excp.errno == 10053:
raise SessionExpired()
except:
raise excp
else:
raise excp
def get_model(self, currdict, attributeregistry, latestschema=None, newarg=None, proppath=None):
"""Returns a model and possibly a bios model for the current instance's schema/registry.
This model can be used to read schema data and validate patches.
:param currdict: The dictionary to gather the schema model from.
:type currdict: dict
:param attributeregistry: The current systems attribute registry. If not gathering a bios
registry this can be set to None.
:type attributeregistry: dict
:param latestschema: Flag to determine if we should drop the schema version when we try to
match schema information. If True, the version will be dropped.
:type latestschema: bool
:param newargs: List of multi level properties to be gathered.
:type newargs: list
:param proppath: The path of the schema you want to validate (from Location header).
:type proppath: str
:returns: model and bios model
"""
type_str = self.typepath.defs.typestring
bsmodel = None
valobj = self.validationmanager
model = valobj.get_registry_model(currtype=currdict[type_str], \
newarg=newarg, latestschema=latestschema, proppath=proppath)
if not attributeregistry and model:
return model, bsmodel
if not model and not attributeregistry:
LOGGER.warning("Unable to locate registry/schema for %s", currdict[type_str])
return None, None
attrval = currdict.get("AttributeRegistry", None)
attrval = list(attributeregistry.values())[0] if not attrval and \
attributeregistry else attrval
bsmodel = valobj.get_registry_model(currtype=attrval if attrval else \
currdict[type_str], newarg=newarg, \
latestschema=latestschema, searchtype=\
self.typepath.defs.attributeregtype)
return model, bsmodel
def _build_monolith(self, path=None, includelogs=False, skipbuild=False):
"""Run through the RIS tree to build monolith
:param path: path to initiate login to.
:type path: str.
:param includelogs: flag to determine id logs should be downloaded.
:type includelogs: boolean.
:param skipbuild: if true, skip build of monolith (initialize empty)
:type skipbuild: True
"""
self.monolith = RisMonolith(self.current_client, self.typepath)
if not skipbuild:
self.monolith.load(path=path, includelogs=includelogs, init=True)
else:
self.monolith.update_member(resp=self.current_client.root, \
path=self.current_client.default_prefix,\
init=False)
def _modifiedpath(self, results, delete=False, replace=False):
"""Check the path and set the modified flag
:param delete: Flag to delete the path in the results
:type delete: bool
:param replace: Flag to replace the path from the results
:type replace: bool
:param results: Response for the path
:type results: RestResponse
"""
if not results or not results.status in (200, 201):
return
path = results.path
path = path.split('/Actions')[0] if 'Actions' in path else path
path = path + '/' if self.typepath.defs.isgen10 and path[-1] != '/' else path
if not replace and path in self.monolith.paths:
self.monolith.paths[path].modified = True
_ = self.monolith.markmodified(path)
if delete and path in self.monolith.paths:
self.monolith.removepath(path)
if replace and path in self.monolith.paths:
self.monolith.paths[path].modified = True
self.monolith.paths[path].patches = []
def _checkforchange(self, paths, crawl=True):
"""Check if the given paths have been modified and updates monolith if it has
:param paths: paths to be checked
:type paths: list
"""
(pathtoetag, _) = self._gettypeswithetag()
mono = self.monolith
self.download_path(list(paths), crawl=crawl, path_refresh=True)
etags = [None if not path in mono.paths else mono.paths[path].etag for path in paths]
sametag = [path for ind, path in enumerate(paths) if path in pathtoetag\
and path in self.monolith.paths and pathtoetag[path] != etags[ind]]
for path in sametag:
self.monolith.paths[path].patches = []
if sametag:
LOGGER.warning('The data in the following paths have been updated. '\
'Recheck the changes made to made. %s', ','.join([str(path) for \
path in sametag]))
def _updatemono(self, currtype=None, path=None, crawl=False, path_refresh=False):
"""Check if type/path exists in current monolith
:param entrytype: the found entry type.
:type entrytype: str.
:param currtype: the current entry type.
:type currtype: str.
:param crawl: flag to determine if | |
#!/usr/bin/python
##############################################
###Python template
###Author: <NAME>
###Date: 7/16/14
###Function:
##### Visualize results of time-based epidemic simulations for immunity_single sims when aligned by epidemic time, which is defined as aligning tsteps at which simulation attained 5% of cumulative infections during the epidemic.
###Import data:
###Command Line: python age_time_immunity_single_epitime_viz.py
##############################################
### notes ###
# Ages:
# 1 = Infant, 2 = Toddler, 3 = Child, 4 = Adult, 5 = Senior, 6 = Elder (in nursing home)
# Places (edge attribute):
# F = household/family, S = school, H = hospital, M = shopping mall, W = workplace, D = daycare, E = elsehwere, P = preschool, O = nursing homes, N = neighbor
# T_critical = 0.0565868
### packages/modules ###
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict
import zipfile
from time import clock
import bisect
import pylab as P
## local modules ##
import percolations as perc
import simulation_parameters as par
import pretty_print as pp
### data processing parameters ###
align_prop = par.dp_alignprop
### plotting parameters ###
numsims = par.pp_numsims
size_epi = par.pp_size_epi
inf_period = par.pp_inf_period
g = par.pp_gamma
T = par.pp_T
b = par.pp_b
# mean retro OR params
beg_perc, end_perc = par.pp_beg_retro_perc, par.pp_end_retro_perc
# specific to immunity params
imm_val_ls = par.pp_immune_val_list
prop = par.pp_prop
zstring = par.pp_pstr_fixed
zstring2 = par.pp_mstr_range
print "Params:", numsims, size_epi, inf_period, g, T, b, imm_val_ls, prop
### data structures ###
d_node_age = {} # d_node_age[nodenumber] = ageclass
### ziparchive to read and write results ###
zipname = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Results/immunity_time_%ssims_beta%.3f_%s_%s.zip' %(numsims, b, zstring, zstring2)
#############################################
# age data processing
graph_ages = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/urban_network_added_with_info_May24_2014/urban_ages_N10k_Sept2012.txt') # node number and age class
for line in graph_ages:
new_line = line.strip().split(' ')
node, age = new_line
d_node_age[node] = age # node-ageclass dictionary
# define network size
N = len(d_node_age)
print "network size:", N
# create binary lists to indicate children and adults
ch = [1 if d_node_age[str(node)] == '3' else 0 for node in xrange(1, int(N) + 1)]
ad = [1 if d_node_age[str(node)] == '4' else 0 for node in xrange(1, int(N) + 1)]
# child and adult population sizes
chsz = float(sum(ch))
adsz = float(sum(ad))
# high risk groups: toddlers (0-2), seniors & elderly (65+)
to = [1 if d_node_age[str(node)] == '1' else 0 for node in xrange(1, int(N) + 1)]
sr = [1 if d_node_age[str(node)] == '5' or d_node_age[str(node)] == '6' else 0 for node in xrange(1, int(N) + 1)]
tosz = float(sum(to))
srsz = float(sum(sr))
print 'children, adults, toddlers, seniors', chsz, adsz, tosz, srsz
##############################################
# data processing - convert tstep info into dictionaries
# storage dictionaries need to be initialized outside the loop
# dict_epiincid[(code, simnumber, 'T', 'C' or 'A')] = [T, C or A incid at tstep 0, T, C or A incid at tstep 1...], where incidence is simply number of new cases (raw)
# dict_epiAR[(code, simnumber, 'T', 'C' or 'A')] = [T, C or A attack rate at tstep 0, T, C or A attack rate at tstep 1...], where attack rate is number of new cases per population size
# dict_epiOR[(code, simnumber)] = [OR at tstep0, OR at tstep1...]
# dict_epiOR_filt[(code, simnum)] = [OR for each time step for epidemics only where OR is nan when we want to exclude the time point due to small infected numbers]
# dict_epiresults[(code, simnumber)] = (episize, c_episize, a_episize)
# d_totepiOR[code] = [attack rate OR at sim1, OR at sim 2...]
# d_retroOR[code] = [mean retro OR at sim1, mean retro OR at sim2...]
d_epiincid, d_epiOR, d_epiresults, d_epiAR, d_epiOR_filt, d_totepiOR, d_retroOR = defaultdict(list), defaultdict(list), {}, defaultdict(list), defaultdict(list), defaultdict(list), defaultdict(list)
for imm_val in imm_val_ls:
zstring3 = 'single%s' %(imm_val) # string for filename disambiguation
processing = clock()
Itstep_file = 'Results/Itstep_immunity_time_%ssims_beta%.3f_%s_%s.txt' %(numsims, b, zstring, zstring3)
Rtstep_file = 'Results/Rtstep_immunity_time_%ssims_beta%.3f_%s_%s.txt' %(numsims, b, zstring, zstring3)
# recreate epidata from zip archive
d_epiincid, d_epiOR, d_epiresults, d_epiAR, d_epiOR_filt = perc.recreate_epidata2(Itstep_file, Rtstep_file, zipname, imm_val, size_epi, ch, ad, to, sr, d_epiincid, d_epiOR, d_epiresults, d_epiAR, d_epiOR_filt)
# calculate OR over entire simulation
d_totepiOR[imm_val] = perc.OR_sim(numsims, d_epiresults, imm_val, chsz, adsz)
# calculate mean retro OR
d_retroOR[imm_val] = perc.mean_retro_OR(d_epiincid, d_epiOR, imm_val, beg_perc, end_perc)
print imm_val, "processed", clock() - processing
# number of simulations that reached epidemic size
num_epi = sum([1 for key in d_epiresults if d_epiresults[key][0] > size_epi])
print imm_val, "number of epidemics", num_epi
# grab unique list of immunity values that produced at least one epidemic
imm_epi = list(set([key[0] for key in d_epiincid]))
print d_retroOR
#############################################
## draw plots
##############################################
### plot total simulation AR with SD bars for children, adults, toddlers and the elderly vs pre-existing immunity
c_mns, c_sds, a_mns, a_sds = [],[],[],[]
d_mns, d_sds, s_mns, s_sds = [],[],[],[]
for v in sorted(imm_epi):
# attack rate by age group
C_episz_allsims = [sum(d_epiincid[key])/chsz for key in d_epiincid if key[0] == v and key[2] == 'C']
A_episz_allsims = [sum(d_epiincid[key])/adsz for key in d_epiincid if key[0] == v and key[2] == 'A']
D_episz_allsims = [sum(d_epiincid[key])/tosz for key in d_epiincid if key[0] == v and key[2] == 'D']
S_episz_allsims = [sum(d_epiincid[key])/srsz for key in d_epiincid if key[0] == v and key[2] == 'S']
# add mean and SD attack rates to list for each Tmult value
c_mns.append(np.mean(C_episz_allsims))
a_mns.append(np.mean(A_episz_allsims))
d_mns.append(np.mean(D_episz_allsims))
s_mns.append(np.mean(S_episz_allsims))
c_sds.append(np.std(C_episz_allsims))
a_sds.append(np.std(A_episz_allsims))
d_sds.append(np.std(D_episz_allsims))
s_sds.append(np.std(S_episz_allsims))
# plot AR by age group with errorbars
CH = plt.errorbar(sorted(imm_epi), c_mns, yerr = c_sds, marker = 'o', color = 'red', linestyle = 'None')
AD = plt.errorbar(sorted(imm_epi), a_mns, yerr = a_sds, marker = 'o', color = 'blue', linestyle = 'None')
TO = plt.errorbar(sorted(imm_epi), d_mns, yerr = d_sds, marker = 'o', color = 'orange', linestyle = 'None')
SR = plt.errorbar(sorted(imm_epi), s_mns, yerr = s_sds, marker = 'o', color = 'green', linestyle = 'None')
plt.xlabel('adult pre-existing immunity (epidemics only)')
plt.ylabel('Attack Rate')
lines = [CH, AD, TO, SR]
plt.legend(lines, ['children (5-18)', 'adults (19-64)', 'toddlers (0-2)', 'seniors (65+)'], loc = 'upper left')
plt.xlim([0, 1])
plt.ylim([0, 1])
figname = 'Figures/HR-AR_immunity_time_%ssims_beta%.3f_%s_%s.png' %(numsims, b, zstring, zstring2)
plt.savefig(figname)
plt.clf()
pp.compress_to_ziparchive(zipname, figname)
# plt.show()
#############################################
## plot total simulation OR with std bars vs pre-existing immunity
plt.errorbar(sorted(imm_epi), [np.mean(d_totepiOR[val]) for val in sorted(imm_epi)], yerr = [np.std(d_totepiOR[val]) for val in sorted(imm_epi)], marker = 'o', color = 'black', linestyle = 'None')
plt.xlabel('adult pre-existing immunity (epidemics only)')
plt.ylabel(par.pf_OR_lab)
plt.xlim([0, 1])
figname = 'Figures/totepiOR_immunity_time_%ssims_beta%.3f_%s_%s.png' %(numsims, b, zstring, zstring2)
plt.savefig(figname)
plt.clf()
pp.compress_to_ziparchive(zipname, figname)
# plt.show()
#############################################
## plot mean retro OR with std bars vs pre-existing immunity
plt.errorbar(sorted(imm_epi), [np.mean(d_retroOR[val]) for val in sorted(imm_epi)], yerr = [np.std(d_retroOR[val]) for val in sorted(imm_epi)], marker = 'o', color = 'black', linestyle = 'None')
plt.xlabel('adult pre-existing immunity (epidemics only)')
plt.ylabel(par.pf_mnretro_lab)
plt.xlim([0, 1])
figname = 'Figures/mnretro-epiOR_immunity_time_%ssims_beta%.3f_%s_%s.png' %(numsims, b, zstring, zstring2)
plt.savefig(figname)
plt.clf()
pp.compress_to_ziparchive(zipname, figname)
# plt.show()
#############################################
for imm in imm_epi:
zstring3 = 'single%s' %(imm) # string for filename disambiguation
##############################################
### plot filtered and aligned OR by time###
# alignment at tstep where sim reaches 5% of total episize
# starting tstep on plot is mode of tsteps where sim reaches 5% of total episize
# each sim is one line, each susc is a diff color on one plot
ORonly = clock()
# PROCESS X-AXIS: identify tstep at which sim reaches 5% of cum infections for the epidemic
# d_dummyalign_tstep[s] = [5%cum-inf_tstep_sim1, 5%cum-inf_tstep_sim2..]
d_dummyalign_tstep, avg_align_tstep, dummyk = perc.define_epi_time(d_epiincid, imm, align_prop)
# realign plots for epitime to start at t = 0 by reassigning avg_align_tstep
avg_align_tstep = 0
# plot aligned data
# zip beta, episim number, and tstep for 5% cum-inf for sims where (s, episim number) is the key for d_epiOR_filt
for k0, k1, t5 in zip((k[0] for k in dummyk), (k[1] for k in dummyk), d_dummyalign_tstep[imm_val]):
plt.plot(xrange(avg_align_tstep, avg_align_tstep+len(d_epiOR_filt[(k0, k1)][t5:])), d_epiOR_filt[(k0, k1)][t5:], marker = 'None', color = 'grey')
plt.plot(xrange(250), [1] * len(xrange(250)), marker = 'None', color = 'red', linewidth = 2)
plt.xlabel('epidemic time step, 5-95% cum infections')
plt.ylabel(par.pf_OR_lab)
figname = 'Figures/epiORalign_immunity_time_%ssims_beta%.3f_%s_%s.png' %(numsims, b, zstring, zstring3)
plt.savefig(figname)
plt.clf()
pp.compress_to_ziparchive(zipname, figname)
print "ORonly plotting time", imm, clock() - ORonly
# plt.show()
##############################################
### plot filtered and aligned OR by time ###
### secondary axis with child and adult incidence ###
# alignment at tstep where sim reaches 5% of total episize
# starting tstep on plot is mode of tsteps where sim reaches 5% of total episize
# each sim is one line, each beta is a diff color on one plot
ORincid = clock()
# PROCESS X-AXIS: identify tstep at which sim reaches 5% of cum infections for the epidemic
# d_dummyalign_tstep[suscept_val] = [5%cum-inf_tstep_sim1, 5%cum-inf_tstep_sim2..]
d_dummyalign_tstep, avg_align_tstep, dummyk = perc.define_epi_time(d_epiincid, imm, align_prop)
# realign plots for epitime to start at t = 0 by reassigning avg_align_tstep
avg_align_tstep = 0
# PROCESS YAX_AR:
# call upon d_epiAR dictionary
# dict_epiAR[(r, simnumber, 'T', 'C' or 'A')] = [T, C or A attack rate at tstep 0, T, C or A attack rate at tstep 1...], where attack rate is number of new cases per 100 individuals
# plot data
# create two y-axes
fig, yax_OR = plt.subplots()
yax_AR = yax_OR.twinx()
# zip s, episim number, and tstep for 5% cum-inf for sims where (s, episim number) is the key for d_epiOR_filt
for k0, k1, t5 in zip((k[0] for k in dummyk), (k[1] for k in dummyk), | |
<gh_stars>1000+
#! TODO: add module docstring
# modelsimp.py - tools for model simplification
#
# Author: <NAME>, <NAME>, <NAME>
# Date: 30 Nov 2010
#
# This file contains routines for obtaining reduced order models
#
# Copyright (c) 2010 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id$
# External packages and modules
import numpy as np
import warnings
from .exception import ControlSlycot, ControlMIMONotImplemented, \
ControlDimension
from .lti import isdtime, isctime
from .statesp import StateSpace
from .statefbk import gram
__all__ = ['hsvd', 'balred', 'modred', 'era', 'markov', 'minreal']
# Hankel Singular Value Decomposition
#
# The following returns the Hankel singular values, which are singular values
# of the matrix formed by multiplying the controllability and observability
# Gramians
def hsvd(sys):
"""Calculate the Hankel singular values.
Parameters
----------
sys : StateSpace
A state space system
Returns
-------
H : array
A list of Hankel singular values
See Also
--------
gram
Notes
-----
The Hankel singular values are the singular values of the Hankel operator.
In practice, we compute the square root of the eigenvalues of the matrix
formed by taking the product of the observability and controllability
gramians. There are other (more efficient) methods based on solving the
Lyapunov equation in a particular way (more details soon).
Examples
--------
>>> H = hsvd(sys)
"""
# TODO: implement for discrete time systems
if (isdtime(sys, strict=True)):
raise NotImplementedError("Function not implemented in discrete time")
Wc = gram(sys, 'c')
Wo = gram(sys, 'o')
WoWc = Wo @ Wc
w, v = np.linalg.eig(WoWc)
hsv = np.sqrt(w)
hsv = np.array(hsv)
hsv = np.sort(hsv)
# Return the Hankel singular values, high to low
return hsv[::-1]
def modred(sys, ELIM, method='matchdc'):
"""
Model reduction of `sys` by eliminating the states in `ELIM` using a given
method.
Parameters
----------
sys: StateSpace
Original system to reduce
ELIM: array
Vector of states to eliminate
method: string
Method of removing states in `ELIM`: either ``'truncate'`` or
``'matchdc'``.
Returns
-------
rsys: StateSpace
A reduced order model
Raises
------
ValueError
Raised under the following conditions:
* if `method` is not either ``'matchdc'`` or ``'truncate'``
* if eigenvalues of `sys.A` are not all in left half plane
(`sys` must be stable)
Examples
--------
>>> rsys = modred(sys, ELIM, method='truncate')
"""
# Check for ss system object, need a utility for this?
# TODO: Check for continous or discrete, only continuous supported for now
# if isCont():
# dico = 'C'
# elif isDisc():
# dico = 'D'
# else:
if (isctime(sys)):
dico = 'C'
else:
raise NotImplementedError("Function not implemented in discrete time")
# Check system is stable
if np.any(np.linalg.eigvals(sys.A).real >= 0.0):
raise ValueError("Oops, the system is unstable!")
ELIM = np.sort(ELIM)
# Create list of elements not to eliminate (NELIM)
NELIM = [i for i in range(len(sys.A)) if i not in ELIM]
# A1 is a matrix of all columns of sys.A not to eliminate
A1 = sys.A[:, NELIM[0]].reshape(-1, 1)
for i in NELIM[1:]:
A1 = np.hstack((A1, sys.A[:, i].reshape(-1, 1)))
A11 = A1[NELIM, :]
A21 = A1[ELIM, :]
# A2 is a matrix of all columns of sys.A to eliminate
A2 = sys.A[:, ELIM[0]].reshape(-1, 1)
for i in ELIM[1:]:
A2 = np.hstack((A2, sys.A[:, i].reshape(-1, 1)))
A12 = A2[NELIM, :]
A22 = A2[ELIM, :]
C1 = sys.C[:, NELIM]
C2 = sys.C[:, ELIM]
B1 = sys.B[NELIM, :]
B2 = sys.B[ELIM, :]
if method == 'matchdc':
# if matchdc, residualize
# Check if the matrix A22 is invertible
if np.linalg.matrix_rank(A22) != len(ELIM):
raise ValueError("Matrix A22 is singular to working precision.")
# Now precompute A22\A21 and A22\B2 (A22I = inv(A22))
# We can solve two linear systems in one pass, since the
# coefficients matrix A22 is the same. Thus, we perform the LU
# decomposition (cubic runtime complexity) of A22 only once!
# The remaining back substitutions are only quadratic in runtime.
A22I_A21_B2 = np.linalg.solve(A22, np.concatenate((A21, B2), axis=1))
A22I_A21 = A22I_A21_B2[:, :A21.shape[1]]
A22I_B2 = A22I_A21_B2[:, A21.shape[1]:]
Ar = A11 - A12 @ A22I_A21
Br = B1 - A12 @ A22I_B2
Cr = C1 - C2 @ A22I_A21
Dr = sys.D - C2 @ A22I_B2
elif method == 'truncate':
# if truncate, simply discard state x2
Ar = A11
Br = B1
Cr = C1
Dr = sys.D
else:
raise ValueError("Oops, method is not supported!")
rsys = StateSpace(Ar, Br, Cr, Dr)
return rsys
def balred(sys, orders, method='truncate', alpha=None):
"""Balanced reduced order model of sys of a given order.
States are eliminated based on Hankel singular value.
If sys has unstable modes, they are removed, the
balanced realization is done on the stable part, then
reinserted in accordance with the reference below.
Reference: Hsu,C.S., and Hou,D., 1991,
Reducing unstable linear control systems via real Schur transformation.
Electronics Letters, 27, 984-986.
Parameters
----------
sys: StateSpace
Original system to reduce
orders: integer or array of integer
Desired order of reduced order model (if a vector, returns a vector
of systems)
method: string
Method of removing states, either ``'truncate'`` or ``'matchdc'``.
alpha: float
Redefines the stability boundary for eigenvalues of the system
matrix A. By default for continuous-time systems, alpha <= 0
defines the stability boundary for the real part of A's eigenvalues
and for discrete-time systems, 0 <= alpha <= 1 defines the stability
boundary for the modulus of A's eigenvalues. See SLICOT routines
AB09MD and AB09ND for more information.
Returns
-------
rsys: StateSpace
A reduced order model or a list of reduced order models if orders is
a list.
Raises
------
ValueError
If `method` is not ``'truncate'`` or ``'matchdc'``
ImportError
if slycot routine ab09ad, ab09md, or ab09nd is not found
ValueError
if there are more unstable modes than any value in orders
Examples
--------
>>> rsys = balred(sys, orders, method='truncate')
"""
if method != 'truncate' and method != 'matchdc':
raise ValueError("supported methods are 'truncate' or 'matchdc'")
elif method == 'truncate':
try:
from slycot import ab09md, ab09ad
except ImportError:
raise ControlSlycot(
"can't find slycot subroutine ab09md or ab09ad")
elif method == 'matchdc':
try:
from slycot import ab09nd
except ImportError:
raise ControlSlycot("can't find slycot subroutine ab09nd")
# Check for ss system object, need a utility for this?
# TODO: Check for continous or discrete, only continuous supported for now
# if isCont():
# dico = 'C'
# elif isDisc():
# dico = 'D'
# else:
dico = 'C'
job = 'B' # balanced (B) or not (N)
equil = 'N' # scale (S) or not (N)
if alpha is None:
if dico == 'C':
alpha = 0.
elif dico == 'D':
alpha = 1.
rsys = [] # empty list for reduced systems
# check if orders is a list or a scalar
try:
order = iter(orders)
except TypeError: # if orders is | |
#!/usr/bin/env python
import os
import astropy.io.fits as pyfits
ns_dmp=globals()
def parse_column_file(input,output=None,offsets=None):
f = open(input,'r').readlines()
dict = {}
for l in f:
import re
res = re .split('\s+',l)
print res
if len(res) > 3:
t = {}
t['cols'] = res[1]
t['offset'] = float(res[4])
dict[res[0]] = t
else:
dict[res[0]] = {'cols':res[1]}
if offsets:
for key in dict:
if key in offsets:
dict[key]['offset'] += offsets[key]
if not output: output = input + '.new'
o = open(input,'w')
for key in dict:
if 'offset' in dict[key]:
o.write(key + '\t' + dict[key]['cols'] + '\tAB\t0.02\t' + str(dict[key]['offset']) + '\n')
else:
o.write(key + '\t' + dict[key]['cols'] + '\n')
o.close()
def fit_zps(dictionary):
dictionary['INTERP'] = 0
command = 'python %(BPZPATH)s/bpz.py %(SUBARUDIR)s/%(CLUSTER)s/%(PHOTOMETRYDIR)s/all_bpz%(magtype)s%(SPECTRA)s%(iaper)s_%(n)s.cat \
-COLUMNS %(columns)s \
-MAG %(magvar)s \
-SPECTRA %(SPECTRA)s \
-PRIOR hdfn_SB \
-CHECK yes \
-PLOTS yes \
-VERBOSE yes \
-ZMAX 4.0 \
-PLOTS yes \
-INTERP %(INTERP)s \
-INTERACTIVE yes \
-ONLY_TYPE yes \
-OUTPUT %(catalog)s' % dictionary
print ' command=',command
import commands
for i in range(1):
import os
os.system('cat ' + dictionary['columns'])
print 'running'
f = commands.getoutput(command).split('\n')
print ' f=',f
go = False
index = 0
import string
offsets = {}
for i in range(len(f)):
print f[i]
if string.find(f[i],'Average') != -1:
import re
filts = re.split('\s+',f[i+1])[1:]
deltas = [float(x) for x in re.split('\s+',f[i+4])[1:-1]]
offsets = dict(zip(filts,deltas))
break
print ' offsets=',offsets
print dictionary['columns']
parse_column_file(dictionary['columns'],offsets=offsets)
#raw_input('finished fit_zps')
def convert_to_mags(run_name,mag_cat,outputfile):
## see adam_plot_bpz_output.py for helpful plots of this stuff
import string,os,sys
print "mag_cat=",mag_cat
mag = pyfits.open(mag_cat)[1]
cat = run_name + '.bpz'
purepath=sys.path
addpath=[os.environ['BPZPATH']]+purepath
sys.path=addpath
from useful import *
from coeio import loaddata, loadfile, params_cl, str2num, loaddict, findmatch1, pause #, prange, plotconfig
sys.path=purepath
bpzstr = loadfile(cat)
bpzparams = {}
i = 0
while bpzstr[i][:2] == '##':
line = bpzstr[i][2:]
if '=' in line:
[key, value] = string.split(line, '=')
bpzparams[key] = value
i = i + 1
columns = bpzparams.get('COLUMNS', run_name+'.columns')
flux_comparison = bpzparams.get('FLUX_COMPARISON', run_name+'.flux_comparison')
zs=get_2Darray(cat) #Read the whole file
all=get_2Darray(flux_comparison) #Read the whole file
ncols=len(all[0,:])
''' need to get the number of filters '''
nf=(ncols-5)/3
filters=get_str(columns,0,nf)
#print ' bpzparams["FLUX_COMPARISON"]=',bpzparams["FLUX_COMPARISON"]
print ' zs=',zs
print ' filters=',filters
print ' len(all[:,0])=',len(all[:,0])
print ' len(all[0,:])=',len(all[0,:])
''' need to retrieve the flux predicted, flux observed, and flux_error '''
import numpy,scipy
ID=scipy.array(all[:,0]) # FLUX (from spectrum for that TYPE)
ft=scipy.array(all[:,5:5+nf]) # FLUX (from spectrum for that TYPE)
fo=scipy.array(all[:,5+nf:5+2*nf]) # FLUX (OBSERVED)
efo=scipy.array(all[:,5+2*nf:5+3*nf]) # FLUX_ERROR (OBSERVED)
print ' len(ft)=',len(ft)
print ' -2.5*scipy.log10(ft)=',-2.5*scipy.log10(ft)
i = 0
cols = []
''' if column not already there, then add it '''
cols.append(pyfits.Column(name='SeqNr', format = 'J', array = ID))
cols.append(pyfits.Column(name='NFILT', format = 'J', array = mag.data.field('NFILT')))
ft_non0_spots=ft>0
#adam-plots# in order to mkek the comparison plots (place notes below into func right here, or do ns_dmp.update(locals()) and paste into terminal)
if 1: #adam-plots# here is how I made the comparison plots (put into func)
from matplotlib.pylab import *
import imagetools
mag_info={}
for i in range(len(filters)):
#print filters[i], i, ft[:,i]
for column in mag.columns:
#if 'MAG_APER1-' + filters[i] == column.name or 'MAG_APER-' + filters[i] == column.name:
if 'MAG_APER1-' + filters[i] == column.name:
if 'MAG_APER1-' + filters[i] == column.name: measured = mag.data.field('MAG_APER1-'+filters[i]).copy()
#if 'MAG_APER-' + filters[i] == column.name: measured = mag.data.field('MAG_APER-'+filters[i])[:,1].copy()
measured_bad=(measured==-99)#+(measured==99)
measured_good=logical_not(measured_bad)
print column.name," measured_bad.sum(), measured_good.sum()=", measured_bad.sum(), measured_good.sum()
if measured_good.sum() > 0:
''' subsitute where there are -99 values '''
if not measured.shape==ft[:,i].shape: raise Exception('not measured.shape==ft[:,i].shape')
measured_b4=measured.copy()
replace_spots=ft_non0_spots[:,i]*measured_bad
if not replace_spots.any():
print column.name, " no suitable replacements found"
break
ft_bads=-2.5*scipy.log10(ft[:,i][replace_spots])
measured_goods=measured[measured_good]
measured_final=measured.copy()
measured_final[replace_spots] = -2.5*scipy.log10(ft[:,i][replace_spots])
#only -99 right now #measured_final[measured_final==99] = -99
print column.name, "min/mean/max of measured_goods: ",measured_goods.min(),measured_goods.mean(),measured_goods.max()
mag_info[column.name]={}
mag_info[column.name]["measured_b4"]=measured_b4
mag_info[column.name]["measured_final"]=measured_final
mag_info[column.name]["measured_goods"]=measured_goods
mag_info[column.name]["ft_bads"]=ft_bads
keys1=mag_info.keys()
keys2=['measured_final', 'measured_goods', 'measured_b4','ft_bads']
for k1 in keys1:
f=figure();f,axes=imagetools.AxesList(f,(2,2))
f.suptitle(k1)
for ax,k2 in zip(axes,keys2):
ax.set_title(k2)
ax.hist(mag_info[k1][k2],bins=100)
f.savefig("/u/ki/awright/wtgpipeline/plots/plt_do_multiple_photoz-"+k1)
for i in range(len(filters)):
print '\nfilters[i]=',filters[i] , ' i=',i , ' ft[:,i]=',ft[:,i]
added = False
for column in mag.columns:
#adam-old# #if 'MAG_APER-' + filters[i] == column.name:
if 'MAG_APER1-' + filters[i] == column.name:
measured = mag.data.field('MAG_APER1-'+filters[i]).copy()
#adam-old# measured = mag.data.field('MAG_APER-'+filters[i])[:,1]
#adam-old# measured_bad=measured==-99
#adam-old# measured_good=measured!=-99
measured_bad=(measured==-99)#+(measured==99)
measured_good=logical_not(measured_bad)
print column.name," measured_bad.sum(), measured_good.sum()=", measured_bad.sum(), measured_good.sum()
if measured_good.any(): #if any good dets
''' subsitute where there are -99 values '''
if not measured.shape==ft[:,i].shape: raise Exception('not measured.shape==ft[:,i].shape')
print column.name, "measured.shape=",measured.shape
#adam: we want values that are measured==-99 and ft's corresponding spots are ft!=0
replace_spots=ft_non0_spots[:,i]*measured_bad
if not replace_spots.any():
print column.name, " no suitable replacements found"
break
measured_final=measured.copy()
measured_final[replace_spots] = -2.5*scipy.log10(ft[:,i][replace_spots])
ft_bads=-2.5*scipy.log10(ft[:,i][replace_spots])
#only -99 right now# measured_final[measured_final==99] = -99
print column.name, "min/mean/max of measured_final: ",measured_final.min(),measured_final.mean(),measured_final.max()
print column.name, "min/mean/max of ft_bads: ",ft_bads.min(),ft_bads.mean(),ft_bads.max()
cols.append(pyfits.Column(name='HYBRID_MAG_APER1-' + filters[i], format = '1E', array = measured_final))
added = True
print column.name, 'measured', filters[i]
break
if not added: #if no good dets, then all HYBRID_MAG is bpz_MAG (this makes perfect sense, but hopefully we never run into this!
print 'adam-look-Error: hit "if not added" portion of "convert_to_mags" function in "adam_do_multiple_photoz.py"\nadam-look-Error: sextractor measured MAG_APER1-'+filters[i]+' has NO good detections, so HYBRID_MAG_APER1-'+filters[i]+' will be ENTIRELY based on bpz output magnitudes!'
cols.append(pyfits.Column(name='HYBRID_MAG_APER1-'+filters[i], format = '1E', array = -2.5*scipy.log10(ft[:,i])))
cols_dont_double=[]
for column_name in mag.columns.names:
if string.find(column_name,'MAG') == -1 and string.find(column_name,'FLUX') != -1:#if it has "FLUX" and doesn't have "MAG" in it
col_to='DATA_' + column_name.replace('FLUX','MAG')
cols_dont_double.append(col_to)
for ii,(column_name,column_format) in enumerate(zip(mag.columns.names,mag.columns.formats)):
if string.find(column_name,'MAG') == -1 and string.find(column_name,'FLUX') != -1:#if it has "FLUX" and doesn't have "MAG" in it
col_to='DATA_' + column_name.replace('FLUX','MAG')
a = -2.5*scipy.log10(mag.data.field(column_name))
a[mag.data.field(column_name) == 0] = -99
cols.append(pyfits.Column(name='DATA_' + column_name.replace('FLUX','MAG'), format = column_format, array = a))
else:
col_to='DATA_' + column_name
if col_to in cols_dont_double:
continue
a = mag.data.field(column_name)
cols.append(pyfits.Column(name='DATA_' + column_name, format = column_format, array = a))
print ' len(cols)=',len(cols)
#adam-fixed# There are duplicate columns apparently!
hdu = pyfits.PrimaryHDU()
hduSTDTAB = pyfits.BinTableHDU.from_columns(cols)
hdulist = pyfits.HDUList([hdu])
hdulist.append(hduSTDTAB)
hdulist[1].header['EXTNAME']='OBJECTS'
print ' outputfile=',outputfile
hdulist.writeto(outputfile,overwrite=True)
#ns_dmp.update(locals()) #adam-tmp#
def add_dummy_ifilter(catalog, outputfile):
import numpy
i = 0
cols = []
tables = pyfits.open(catalog)['OBJECTS']
for col in ['SeqNr']:
cols.append(pyfits.Column(name=col, format = 'J', array = tables.data.field(col)))
already_there = False
for column in tables.columns:
cols.append(column)
if column.name == 'FLUX_APER1-SUBARU-10_2-1-W-S-I+':
already_there = True
''' if column not already there, then add it STILL NEED TO IMPLEMENT !!! '''
rows = len(pyfits.open(catalog)['OBJECTS'].data)
if not already_there:
cols.append(pyfits.Column(name='FLUX_APER0-SUBARU-10_2-1-W-S-I+', format = '1E', array = numpy.zeros(rows)))
cols.append(pyfits.Column(name='FLUXERR_APER0-SUBARU-10_2-1-W-S-I+', format = '1E', array = numpy.zeros(rows)))
cols.append(pyfits.Column(name='FLUX_APER1-SUBARU-10_2-1-W-S-I+', format = '1E', array = numpy.zeros(rows)))
cols.append(pyfits.Column(name='FLUXERR_APER1-SUBARU-10_2-1-W-S-I+', format = '1E', array = numpy.zeros(rows)))
cols.append(pyfits.Column(name='MAG_APER0-SUBARU-10_2-1-W-S-I+', format = '1E', array = numpy.zeros(rows)))
cols.append(pyfits.Column(name='MAGERR_APER0-SUBARU-10_2-1-W-S-I+', format = '1E', array = numpy.zeros(rows)))
cols.append(pyfits.Column(name='MAG_APER1-SUBARU-10_2-1-W-S-I+', format = '1E', array = numpy.zeros(rows)))
cols.append(pyfits.Column(name='MAGERR_APER1-SUBARU-10_2-1-W-S-I+', format = '1E', array = numpy.zeros(rows)))
#adam-SHNT# Ok, so this just puts the cols in there as zeros and leaves it up to "convert_to_mags" to calculate the "HYBRID" versions, how does that work??
#print ' cols=',cols
print ' len(cols)=',len(cols)
hdu = pyfits.PrimaryHDU()
hduSTDTAB = pyfits.BinTableHDU.from_columns(cols)
hdulist = pyfits.HDUList([hdu])
hdulist.append(hduSTDTAB)
hdulist[1].header['EXTNAME']='OBJECTS'
import os
os.system('rm ' + outputfile)
print ' outputfile=',outputfile
hdulist.writeto(outputfile)
def add_dummy_filters(catalog, outputfile):
add_filters =['MEGAPRIME-0-1-g','MEGAPRIME-0-1-r','MEGAPRIME-0-1-i','MEGAPRIME-0-1-z','SUBARU-10_2-1-W-S-G+','SUBARU-10_2-1-W-C-RC','SUBARU-10_2-1-W-C-IC']
use_filters = ['MEGAPRIME-0-1-u','SUBARU-10_2-1-W-J-B','SUBARU-10_2-1-W-J-V','SUBARU-10_2-1-W-S-R+','SUBARU-10_2-1-W-S-I+','SUBARU-10_2-1-W-S-Z+']
import numpy
i = 0
cols = []
tables = pyfits.open(catalog)['OBJECTS']
for col in ['SeqNr','B_mask','V_mask','i_mask','z_mask']:
cols.append(pyfits.Column(name=col, format = 'J', array = tables.data.field(col)))
for filt in use_filters: # tables[str(i)]['OBJECTS'].columns:
cols.append(pyfits.Column(name='MAG_APER-'+filt, format = '1E', array = tables.data.field('MAG_APER-'+filt)))
cols.append(pyfits.Column(name='MAGERR_APER-'+filt, format = '1E', array = tables.data.field('MAGERR_APER-'+filt)))
''' if column not already there, then add it STILL NEED TO IMPLEMENT !!! '''
rows = len(pyfits.open(catalog)['OBJECTS'].data)
for filt in add_filters:
cols.append(pyfits.Column(name='MAG_APER-'+filt, format = '1E', array = -99.*numpy.ones(rows)))
cols.append(pyfits.Column(name='MAGERR_APER-'+filt, format = '1E', array = 99.*numpy.ones(rows)))
print ' len(cols)=',len(cols)
hdu = pyfits.PrimaryHDU()
hduSTDTAB = pyfits.BinTableHDU.from_columns(cols)
hdulist = pyfits.HDUList([hdu])
hdulist.append(hduSTDTAB)
hdulist[1].header['EXTNAME']='OBJECTS'
import os
os.system('rm ' + outputfile)
print ' outputfile=',outputfile
hdulist.writeto(outputfile)
def mkplot(file,name):
import MySQLdb
import os, sys, anydbm, time
import lib, scipy, pylab
from scipy import arange
file = open(file,'r').readlines()
results = []
for line in file:
if line[0] != '#':
import re
res = re.split('\s+',line)
#for i in range(len(res)):
# print res[i],i
results.append([float(res[2]),float(res[23]),res[1]])
diff = []
z = []
z_spec = []
zbs = {'0,0.2':[],'0.2,0.4':[],'0.4,0.6':[],'0.6,0.8':[]}
for line in results:
diff_val = (line[0] - line[1])/(1 + line[1])
diff.append(diff_val)
z.append(line[0])
z_spec.append(line[1])
for zb in zbs.keys():
import re
min,max = re.split('\,',zb)
if float(min) <= float(line[1]) < float(max):
zbs[zb].append(diff_val)
for zb in zbs.keys():
import scipy
print ' zb=',zb , ' scipy.median(scipy.array(zbs[zb]))=',scipy.median(scipy.array(zbs[zb]))
ys = []
for y in zbs[zb]:
if abs(y) < 0.1:
ys.append(y)
print ' scipy.mean(scipy.array(ys))=',scipy.mean(scipy.array(ys))
list = diff[:]
import pylab
| |
import glob
from shutil import copy2
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
import pprint
import pandas as pd
import plotly.express as px
from plotly.subplots import make_subplots
prompt = lambda q : input("{} (y/n): ".format(q)).lower().strip()[:1] == "y"
def parse_log(filename, params, eval_k, cl_to_plot_id, target_measure, print_params, start_line=None, end_line=None):
res_map={}
errors = {}
losses = {}
MRRs = {}
MAPs = {}
AUCs = {}
prec = {}
rec = {}
f1 = {}
prec_at_k = {}
rec_at_k = {}
f1_at_k = {}
prec_cl = {}
rec_cl = {}
f1_cl = {}
prec_at_k_cl = {}
rec_at_k_cl = {}
f1_at_k_cl = {}
best_measure = {}
best_epoch = {}
target_metric_best = {}
last_test_ep={}
last_test_ep['precision'] = '-'
last_test_ep['recall'] = '-'
last_test_ep['F1'] = '-'
last_test_ep['AVG-precision'] = '-'
last_test_ep['AVG-recall'] = '-'
last_test_ep['AVG-F1'] = '-'
last_test_ep['precision@'+str(eval_k)] = '-'
last_test_ep['recall@'+str(eval_k)] = '-'
last_test_ep['F1@'+str(eval_k)] = '-'
last_test_ep['AVG-precision@'+str(eval_k)] = '-'
last_test_ep['AVG-recall@'+str(eval_k)] = '-'
last_test_ep['AVG-F1@'+str(eval_k)] = '-'
last_test_ep['mrr'] = '-'
last_test_ep['map'] = '-'
last_test_ep['auc'] = '-'
last_test_ep['best_epoch'] = -1
set_names = ['TRAIN', 'VALID', 'TEST']
finished = False
epoch = 0
metrics_names = ["error" ,
"loss" ,
"mrr" ,
"map" ,
"auc" ,
"gmauc" ,
"lp_map" ,
"lp_auc",
"1000_auc",
"1000_map",
"100_auc",
"100_map",
"10_auc",
"10_map",
"1_auc",
"1_map",
]
metrics = {metric: {} for metric in metrics_names}
for s in set_names:
for metric in metrics:
metrics[metric][s] = {}
prec[s] = {}
rec[s] = {}
f1[s] = {}
prec_at_k[s] = {}
rec_at_k[s] = {}
f1_at_k[s] = {}
prec_cl[s] = {}
rec_cl[s] = {}
f1_cl[s] = {}
prec_at_k_cl[s] = {}
rec_at_k_cl[s] = {}
f1_at_k_cl[s] = {}
best_measure[s] = 0
best_epoch[s] = -1
str_comments=''
str_comments1=''
exp_params={}
#print ("Start parsing: ",filename, 'starting at', start_line)
with open(filename) as f:
params_line=True
readlr=False
line_nr = 0
for line in f:
line_nr += 1
if start_line != None and start_line > line_nr:
continue
if end_line != None and end_line <= line_nr:
break
line=line.replace('INFO:root:','').replace('\n','')
if params_line: #print parameters
if "'learning_rate':" in line:
readlr=True
if not readlr:
str_comments+=line+'\n'
else:
str_comments1+=line+'\n'
if params_line: #print parameters
for p in params:
str_p='\''+p+'\': '
if str_p in line:
exp_params[p]=line.split(str_p)[1].split(',')[0]
if line=='':
params_line=False
if 'TRAIN epoch' in line or 'VALID epoch' in line or 'TEST epoch' in line:
set_name = line.split(' ')[1]
previous_epoch = epoch
epoch = int(line.split(' ')[3])
if set_name=='TEST':
last_test_ep['best_epoch'] = epoch
if epoch>=50000:
break
if previous_epoch > epoch and epoch == 1:
epoch = previous_epoch #used to distinguish between downstream and frozen decoder
break #A new training has started, e.g frozen encoder or downstream
#print('set_name', set_name, 'epoch', epoch)
if 'Number of parameters' in line:
res_map['num_gcn_params'] = int(line.split('GCN: ')[1].split(',')[0])
res_map['num_cls_params'] = int(line.split('Classifier: ')[1].split(',')[0])
res_map['num_total_params'] = int(line.split('Total: ')[1].split(',')[0])
assert(res_map['num_gcn_params'] + res_map['num_cls_params'] == res_map['num_total_params'])
if "mean" in line:
for metric in metrics:
if "mean {} ".format(metric) in line:
v=float(line.split('mean {} '.format(metric))[1].split(' ')[0])
metrics[metric][set_name][epoch]=v
if target_measure==metric:
if target_measure == 'loss':
is_better = v<best_measure[set_name]
else:
is_better = v>best_measure[set_name]
if is_better:
best_measure[set_name]=v
best_epoch[set_name]=epoch
if set_name=='TEST':
last_test_ep[metric] = v
if 'measures microavg' in line:
prec[set_name][epoch]=float(line.split('precision ')[1].split(' ')[0])
rec[set_name][epoch]=float(line.split('recall ')[1].split(' ')[0])
f1[set_name][epoch]=float(line.split('f1 ')[1].split(' ')[0])
if (target_measure=='avg_p' or target_measure=='avg_r' or target_measure=='avg_f1'):
if target_measure=='avg_p':
v=prec[set_name][epoch]
elif target_measure=='avg_r':
v=rec[set_name][epoch]
else: #F1
v=f1[set_name][epoch]
if v>best_measure[set_name]:
best_measure[set_name]=v
best_epoch[set_name]=epoch
if set_name=='TEST':
last_test_ep['AVG-precision'] = prec[set_name][epoch]
last_test_ep['AVG-recall'] = rec[set_name][epoch]
last_test_ep['AVG-F1'] = f1[set_name][epoch]
elif 'measures@'+str(eval_k)+' microavg' in line:
prec_at_k[set_name][epoch]=float(line.split('precision ')[1].split(' ')[0])
rec_at_k[set_name][epoch]=float(line.split('recall ')[1].split(' ')[0])
f1_at_k[set_name][epoch]=float(line.split('f1 ')[1].split(' ')[0])
if set_name=='TEST':
last_test_ep['AVG-precision@'+str(eval_k)] = prec_at_k[set_name][epoch]
last_test_ep['AVG-recall@'+str(eval_k)] = rec_at_k[set_name][epoch]
last_test_ep['AVG-F1@'+str(eval_k)] = f1_at_k[set_name][epoch]
elif 'measures for class ' in line:
cl=int(line.split('class ')[1].split(' ')[0])
if cl not in prec_cl[set_name]:
prec_cl[set_name][cl] = {}
rec_cl[set_name][cl] = {}
f1_cl[set_name][cl] = {}
prec_cl[set_name][cl][epoch]=float(line.split('precision ')[1].split(' ')[0])
rec_cl[set_name][cl][epoch]=float(line.split('recall ')[1].split(' ')[0])
f1_cl[set_name][cl][epoch]=float(line.split('f1 ')[1].split(' ')[0])
if (target_measure=='p' or target_measure=='r' or target_measure=='f1') and cl==cl_to_plot_id:
if target_measure=='p':
v=prec_cl[set_name][cl][epoch]
elif target_measure=='r':
v=rec_cl[set_name][cl][epoch]
else: #F1
v=f1_cl[set_name][cl][epoch]
if v>best_measure[set_name]:
best_measure[set_name]=v
best_epoch[set_name]=epoch
if set_name=='TEST':
last_test_ep['precision'] = prec_cl[set_name][cl][epoch]
last_test_ep['recall'] = rec_cl[set_name][cl][epoch]
last_test_ep['F1'] = f1_cl[set_name][cl][epoch]
elif 'measures@'+str(eval_k)+' for class ' in line:
cl=int(line.split('class ')[1].split(' ')[0])
if cl not in prec_at_k_cl[set_name]:
prec_at_k_cl[set_name][cl] = {}
rec_at_k_cl[set_name][cl] = {}
f1_at_k_cl[set_name][cl] = {}
prec_at_k_cl[set_name][cl][epoch]=float(line.split('precision ')[1].split(' ')[0])
rec_at_k_cl[set_name][cl][epoch]=float(line.split('recall ')[1].split(' ')[0])
f1_at_k_cl[set_name][cl][epoch]=float(line.split('f1 ')[1].split(' ')[0])
if (target_measure=='p@k' or target_measure=='r@k' or target_measure=='f1@k') and cl==cl_to_plot_id:
if target_measure=='p@k':
v=prec_at_k_cl[set_name][cl][epoch]
elif target_measure=='r@k':
v=rec_at_k_cl[set_name][cl][epoch]
else:
v=f1_at_k_cl[set_name][cl][epoch]
if v>best_measure[set_name]:
best_measure[set_name]=v
best_epoch[set_name]=epoch
if set_name=='TEST':
last_test_ep['precision@'+str(eval_k)] = prec_at_k_cl[set_name][cl][epoch]
last_test_ep['recall@'+str(eval_k)] = rec_at_k_cl[set_name][cl][epoch]
last_test_ep['F1@'+str(eval_k)] = f1_at_k_cl[set_name][cl][epoch]
if 'FINISHED' in line:
finished = True
if best_epoch['TEST']<0 and best_epoch['VALID']<0 or last_test_ep['best_epoch']<1:
# Nothing learned, it is useless, abort
print ('best_epoch<0: -> skip')
target_best = {}
target_best['TEST'] = 0
str_legend = 'useless'
str_results = 0
return res_map, exp_params, metrics, str_legend, str_results, target_best, finished, line_nr, epoch
if start_line == None:
# Will fail for frozen encoder and downstream runs, so only do this for the first parse
res_map['model'] = exp_params['model'].replace("'","")
str_params=(pprint.pformat(exp_params))
if print_params:
print ('str_params:\n', str_params)
if best_epoch['VALID']>=0:
best_ep = best_epoch['VALID']
#print ('Highest %s values among all epochs: TRAIN %0.4f\tVALID %0.4f\tTEST %0.4f' % (target_measure, best_measure['TRAIN'], best_measure['VALID'], best_measure['TEST']))
else:
best_ep = best_epoch['TEST']
#print ('Highest %s values among all epochs:\tTRAIN F1 %0.4f\tTEST %0.4f' % (target_measure, best_measure['TRAIN'], best_measure['TEST']))
use_latest_ep = True
try:
#print ('Values at best Valid Epoch (%d) for target class: TEST Precision %0.4f - Recall %0.4f - F1 %0.4f' % (best_ep, prec_cl['TEST'][cl_to_plot_id][best_ep],rec_cl['TEST'][cl_to_plot_id][best_ep],f1_cl['TEST'][cl_to_plot_id][best_ep]))
#print ('Values at best Valid Epoch (%d) micro-AVG: TEST Precision %0.4f - Recall %0.4f - F1 %0.4f' % (best_ep, prec['TEST'][best_ep],rec['TEST'][best_ep],f1['TEST'][best_ep]))
res_map['precision'] = prec_cl['TEST'][cl_to_plot_id][best_ep]
res_map['recall'] = rec_cl['TEST'][cl_to_plot_id][best_ep]
res_map['F1'] = f1_cl['TEST'][cl_to_plot_id][best_ep]
res_map['AVG-precision'] = prec['TEST'][best_ep]
res_map['AVG-recall'] = rec['TEST'][best_ep]
res_map['AVG-F1'] = f1['TEST'][best_ep]
except:
res_map['precision'] = last_test_ep['precision']
res_map['recall'] = last_test_ep['recall']
res_map['F1'] = last_test_ep['F1']
res_map['AVG-precision'] = last_test_ep['AVG-precision']
res_map['AVG-recall'] = last_test_ep['AVG-F1']
res_map['AVG-F1'] = last_test_ep['AVG-F1']
use_latest_ep = False
#print ('WARNING: last epoch not finished, use the previous one.')
try:
#print ('Values at best Valid Epoch (%d) for target class@%d: TEST Precision %0.4f - Recall %0.4f - F1 %0.4f' % (best_ep, eval_k, prec_at_k_cl['TEST'][cl_to_plot_id][best_ep],rec_at_k_cl['TEST'][cl_to_plot_id][best_ep],f1_at_k_cl['TEST'][cl_to_plot_id][best_ep]))
res_map['precision@'+str(eval_k)] = prec_at_k_cl['TEST'][cl_to_plot_id][best_ep]
res_map['recall@'+str(eval_k)] = rec_at_k_cl['TEST'][cl_to_plot_id][best_ep]
res_map['F1@'+str(eval_k)] = f1_at_k_cl['TEST'][cl_to_plot_id][best_ep]
#print ('Values at best Valid Epoch (%d) micro-AVG@%d: TEST Precision %0.4f - Recall %0.4f - F1 %0.4f' % (best_ep, eval_k, prec_at_k['TEST'][best_ep],rec_at_k['TEST'][best_ep],f1_at_k['TEST'][best_ep]))
res_map['AVG-precision@'+str(eval_k)] = prec_at_k['TEST'][best_ep]
res_map['AVG-recall@'+str(eval_k)] = rec_at_k['TEST'][best_ep]
res_map['AVG-F1@'+str(eval_k)] = f1_at_k['TEST'][best_ep]
except:
res_map['precision@'+str(eval_k)] = last_test_ep['precision@'+str(eval_k)]
res_map['recall@'+str(eval_k)] = last_test_ep['recall@'+str(eval_k)]
res_map['F1@'+str(eval_k)] = last_test_ep['F1@'+str(eval_k)]
res_map['AVG-precision@'+str(eval_k)] = last_test_ep['AVG-precision@'+str(eval_k)]
res_map['AVG-recall@'+str(eval_k)] = last_test_ep['AVG-recall@'+str(eval_k)]
res_map['AVG-F1@'+str(eval_k)] = last_test_ep['AVG-F1@'+str(eval_k)]
for metric in metrics:
if len(metrics[metric]['TEST']) <= 0:
continue
try:
if metric == target_measure:
target_metric_best['TRAIN'] = metrics[metric]['TRAIN'][best_ep]
target_metric_best['VALID'] = metrics[metric]['VALID'][best_ep]
target_metric_best['TEST'] = metrics[metric]['TEST'][best_ep]
#print('Values at best Valid Epoch ({}) {}: TRAIN {} - VALID {} - TEST {}'.format(
# best_ep,
# metric,
# metrics[metric]['TRAIN'][best_ep],
# metrics[metric]['VALID'][best_ep],
# metrics[metric]['TEST'][best_ep]))
res_map[metric] = metrics[metric]['TEST'][best_ep]
except:
res_map[metric] = last_test_ep[metric]
#print ('WARNING: last epoch not finished, use the previous one.')
if use_latest_ep:
res_map['best_epoch'] = best_ep
else:
#print ('WARNING: last epoch not finished, use the previous one.')
res_map['best_epoch'] = last_test_ep['best_epoch']
str_results = ''
str_legend = ''
for k, v in res_map.items():
str_results+=str(v)+','
str_legend+=str(k)+','
for k, v in exp_params.items():
str_results+=str(v)+','
str_legend+=str(k)+','
log_file = filename.split('/')[-1].split('.log')[0]
res_map['log_file'] = log_file
grid_cell = log_file.split('grid_')[1]
res_map['grid_cell'] = grid_cell
str_results+='{},{}'.format(log_file, grid_cell)
str_legend+='log_file,grid_cell'
#print ('\n\nCSV-like output:')
#print (str_legend)
#print (str_results)
return res_map, exp_params, metrics, str_legend, str_results, target_metric_best, finished, line_nr, epoch
def parse_all_logs_in_folder(log_folder, return_continuous_encoder_logs=False):
cl_to_plot_id = 1 # Target class, typically the low frequent one
# We don't do edge classification here
#if 'reddit' in log_folder or ('bitcoin' in log_folder and 'edge' in log_folder):
# cl_to_plot_id = 0 # 0 for reddit dataset_name or bitcoin edge cls
simulate_early_stop = 0 # Early stop patience
eval_k = 1000 # to compute metrics @K (for instance precision@1000)
print_params = False # Print the parameters of each simulation
##### End parameters ######
#if 'elliptic' in log_folder or 'reddit' in log_folder or 'enron' in log_folder or ('bitcoin' in log_folder and 'edge' in log_folder):
# target_measure='f1' # map mrr auc f1 p r loss avg_p avg_r avg_f1
#else:
# target_measure='map' # map mrr auc f1 p r loss avg_p avg_r avg_f1
target_measure='map' # map mrr auc f1 p r loss avg_p avg_r avg_f1
# Hyper parameters to analyze
params = []
params.append('learning_rate')
params.append('num_hist_steps')
params.append('layer_1_feats')
params.append('lstm_l1_feats')
params.append('class_weights')
params.append('adj_mat_time_window')
params.append('cls_feats')
params.append('model')
params.append('val_sampling')
logs = {}
continuous_encoder_logs = {}
csv = []
csv_continuous_encoder = []
header = None
log_folderfiles = glob.glob(log_folder+'*')
printstr = ''
best_log_file = ''
best_log_file_continuous_encoder = ''
best_target_metric = 0
best_target_metric_continuous_encoder = 0
for log_file in log_folderfiles:
if log_file.endswith(".log") and not 'best_' in | |
<gh_stars>1-10
"""
Generator and Predictor classes for the byte-level byte-to-span model for NER.
"""
# pylint: disable=too-many-locals
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-statements
# pylint: disable=invalid-name
from math import floor
import tensorflow as tf
import numpy as np
import ner.data.dataset as data
from ner.features import Features, Predictions
from ner.registry import Registries
import ner.byte as byte
@Registries.train_generators.register("byte_to_span")
class Byte2SpanDSLevelTrainGenerator(data.Generator):
""" Iterator for Byte2Span Model _training_ data.
It iterates over the entire datasets worth of byte-sequences, creating
independent segments at each point.
"""
def __init__(self, *, dataset, hp):
data.Generator.__init__(self, dataset, hp)
self.window_size = hp.window_size
self.stride = hp.stride
self.spans = {}
self.output_idxr = byte.seq2seq_indexer(self.window_size)
self.duplicate_data = hp.duplicate_data
self.byte_drop_rate = hp.byte_dropout_rate
self.byte_drop_idx = 256
complete_byte_seq, spans = self.dataset.get_dataset_as_byte_sequence_and_spans()
self.complete_byte_seq = complete_byte_seq
self.spans = spans
def iterate(self):
""" Iterate over consecutive windows of size self.window_size, with stride
self.stride. Iterates over the entire dataset at once, so consecutive
sentences share windows sometimes.
"""
total_num_bytes = len(self.complete_byte_seq)
num_steps = floor(total_num_bytes / self.stride)
example_count = 0
for i in range(num_steps):
start = i * self.stride
end = total_num_bytes \
if start + self.window_size > total_num_bytes \
else start + self.window_size
byte_of_bytes = self.complete_byte_seq[start:end]
spans = self.get_spans_within(segment_start=start, segment_end=end)
segment_spans = []
for span in spans:
segment_spans.append(span.inside_segment_start(start))
segment = byte.ByteSegment(_id=example_count,
sent_id=0,
wbytes=byte_of_bytes,
spans=segment_spans,
absolute_start=start)
if self.byte_drop_rate >= 0.:
seq = self.apply_drop_to_seq(segment.bytes)
seq_no_drop = segment.bytes
else:
seq = segment.bytes
seq_no_drop = None
features = {
Features.INPUT_SYMBOLS.value: seq,
Features.INPUT_SEQUENCE_LENGTH.value: [len(seq)],
Features.WINDOW_ID.value: [example_count],
Features.REL_POS.value: [segment.absolute_start]
}
target, teacher = segment.build_gold_labels()
target_idxd = [self.output_idxr[t] for t in target]
teacher_idxd = [self.output_idxr[t] for t in teacher]
targets = {
Features.TARGET_SEQUENCE.value: target_idxd,
Features.TEACHER_TARGET_SEQUENCE.value: teacher_idxd,
Features.TARGET_SEQUENCE_LENGTH.value: [len(target_idxd)]
}
example_count += 1
yield features, targets
if self.duplicate_data:
assert seq_no_drop is not None
features = {
Features.INPUT_SYMBOLS.value: seq_no_drop,
Features.INPUT_SEQUENCE_LENGTH.value: [len(seq)],
Features.WINDOW_ID.value: [example_count],
Features.REL_POS.value: [segment.absolute_start]
}
yield features, targets
def estimator_params(self):
""" Return parameters for tf.Estimator class for training. """
return {
'output-voc-size': len(self.output_idxr),
'go-idx': self.output_idxr['GO'],
'stop-idx': self.output_idxr['STOP'],
}
def datashape(self):
features = {
Features.INPUT_SYMBOLS.value: [None],
Features.INPUT_SEQUENCE_LENGTH.value: [1],
Features.WINDOW_ID.value: 1,
Features.REL_POS.value: 1,
}
targets = {
Features.TARGET_SEQUENCE.value: [None],
Features.TEACHER_TARGET_SEQUENCE.value: [None],
Features.TARGET_SEQUENCE_LENGTH.value: [1]
}
return (features, targets)
def datatypes(self):
features = {
Features.INPUT_SYMBOLS.value: tf.int64,
Features.INPUT_SEQUENCE_LENGTH.value: tf.int64,
Features.WINDOW_ID.value: tf.int64,
Features.REL_POS.value: tf.int64,
}
targets = {
Features.TARGET_SEQUENCE.value: tf.int64,
Features.TEACHER_TARGET_SEQUENCE.value: tf.int64,
Features.TARGET_SEQUENCE_LENGTH.value: tf.int64
}
return (features, targets)
def get_spans_within(self, *, segment_start, segment_end):
""" Get all spans inside a certain window """
spans = []
for i in range(segment_start, segment_end):
if i in self.spans:
span = self.spans[i]
if span.contained_in(seg_start=segment_start, seg_end=segment_end):
spans.append(span)
return spans
def apply_drop_to_seq(self, seq):
""" Byte-dropout. Drop a certain number of bytes in the sequence.
From https://arxiv.org/abs/1512.00103
"""
out_seq = []
for byt in seq:
# just some sanity checking
assert byt != self.byte_drop_idx
r = np.random.rand()
if r <= self.byte_drop_rate:
out_seq.append(self.byte_drop_idx)
else:
out_seq.append(byt)
assert len(out_seq) == len(seq)
return out_seq
@Registries.test_generators.register("byte_to_span")
class Byte2SpanDSLevelTestGenerator(data.Generator):
""" Iterator for Byte2Span Model _test_ data.
It iterates over the entire datasets worth of byte-sequences, creating
independent segments with overlaps of window_size, to be stitched together
for prediction.
"""
def __init__(self, *, dataset, hp):
data.Generator.__init__(self, dataset, hp)
self.window_size = hp.window_size
self.overlap = hp.test_overlap
self.stride = self.window_size - self.overlap
self.output_idxr = byte.seq2seq_indexer(self.window_size)
complete_byte_seq, spans = self.dataset.get_dataset_as_byte_sequence_and_spans()
self.complete_byte_seq = complete_byte_seq
self.spans = spans
self.segment_map = {}
def iterate(self):
num_steps = self.num_steps()
total_num_bytes = len(self.complete_byte_seq)
tf.logging.info("{} Test Segments To Be Generated!".format(num_steps))
for i in range(num_steps):
start = i * self.stride
end = total_num_bytes \
if start + self.window_size > total_num_bytes \
else start + self.window_size
byte_of_bytes = self.complete_byte_seq[start:end]
spans = self.get_spans_within(segment_start=start, segment_end=end)
segment_spans = []
for span in spans:
segment_spans.append(span.inside_segment_start(start))
segment = byte.ByteSegment(_id=i,
sent_id=0,
wbytes=byte_of_bytes,
spans=segment_spans,
absolute_start=start)
self.segment_map[i] = segment
features = {
Features.INPUT_SYMBOLS.value: segment.bytes,
Features.INPUT_SEQUENCE_LENGTH.value: [len(segment.bytes)],
Features.WINDOW_ID.value: [i],
Features.REL_POS.value: [segment.absolute_start]
}
target, teacher = segment.build_gold_labels()
target_idxd = [self.output_idxr[t] for t in target]
teacher_idxd = [self.output_idxr[t] for t in teacher]
targets = {
Features.TARGET_SEQUENCE.value: target_idxd,
Features.TEACHER_TARGET_SEQUENCE.value: teacher_idxd,
Features.TARGET_SEQUENCE_LENGTH.value: [len(target_idxd)]
}
yield features, targets
def estimator_params(self):
""" Return parameters for tf.Estimator class for prediction. """
return {
'output-voc-size': len(self.output_idxr),
'go-idx': self.output_idxr['GO'],
'stop-idx': self.output_idxr['STOP'],
}
def datashape(self):
features = {
Features.INPUT_SYMBOLS.value: [None],
Features.INPUT_SEQUENCE_LENGTH.value: [1],
Features.WINDOW_ID.value: 1,
Features.REL_POS.value: 1,
}
targets = {
Features.TARGET_SEQUENCE.value: [None],
Features.TEACHER_TARGET_SEQUENCE.value: [None],
Features.TARGET_SEQUENCE_LENGTH.value: [1]
}
return (features, targets)
def datatypes(self):
features = {
Features.INPUT_SYMBOLS.value: tf.int64,
Features.INPUT_SEQUENCE_LENGTH.value: tf.int64,
Features.WINDOW_ID.value: tf.int64,
Features.REL_POS.value: tf.int64,
}
targets = {
Features.TARGET_SEQUENCE.value: tf.int64,
Features.TEACHER_TARGET_SEQUENCE.value: tf.int64,
Features.TARGET_SEQUENCE_LENGTH.value: tf.int64
}
return (features, targets)
def get_spans_within(self, *, segment_start, segment_end):
""" Get all spans inside a certain window """
spans = []
for i in range(segment_start, segment_end):
if i in self.spans:
span = self.spans[i]
if span.contained_in(seg_start=segment_start, seg_end=segment_end):
spans.append(span)
return spans
def num_steps(self):
""" Number of steps it takes to iterate over all segments of the
dataset.
"""
seq_len = len(self.complete_byte_seq)
max_start = max(seq_len - self.window_size, 0)
if max_start % self.stride == 0:
num_steps = (max_start / self.stride) + 1
else:
num_steps = floor(max_start/self.stride) + 2
return int(num_steps)
def get_segment_map(self):
""" Helper. """
return self.segment_map
@Registries.predictors.register("byte_to_span")
class Byte2SpanPredictor(data.Predictor):
""" Inference for a Byte2Span model.
We're assuming that inference was done at a "dataset-level". I.e. we're
recieiving segments whose absolute position feature indicates that
segments absolute position within the byte sequence of the _entire_
dataset.
So, in order to do inference, this class decodes the spans from each
segment's predicted sequence, and then resolves conflicts etc. and then
decodes each word label by deciding whether or not that word falls
within a span.
This is a rather messy and complex little class. I'm sorry.
"""
def __init__(self, dataset, hp):
data.Predictor.__init__(self, dataset, hp)
self.rev_idxr = byte.seq2seq_rev_indexer(
byte.seq2seq_indexer(hp.window_size)
)
self.test_overlap = hp.test_overlap
self.all_predicted_spans = []
self.filtered_predicted_spans = {}
def gather(self, predictions):
""" infer and store predictions.
Predictions, in this case, should be the predictions of a byte2span
model (sequences of span predictions, over an entire dataset)
"""
self.segment_prediction_map = {}
for prediction in predictions:
segment_id = prediction[Predictions.WINDOW_ID.value][0]
self.segment_prediction_map[segment_id] = prediction
p_seq_len = prediction[Predictions.LENGTH.value]
p_seq = prediction[Predictions.TAGS.value][:p_seq_len]
p_spans = self.get_spans_from_seq(p_seq)
for p_span in p_spans:
self.all_predicted_spans += [
p_span.outside_segment_start(prediction[Predictions.REL_POS.value][0])
]
tf.logging.info("{} Total spans predicted!".format(len(self.all_predicted_spans)))
self.filter_predicted_spans()
tf.logging.info("{} Filtered spans predicted!".format(len(self.filtered_predicted_spans)))
cur_abs_idx = 0
prev_tag = 'O'
for sentence in self.dataset.get_sentences():
sentence_predicted_tags = []
for i, word in enumerate(sentence.word_list):
word_size = len(bytes(word, encoding='utf-8'))
word_start = cur_abs_idx
word_end = cur_abs_idx + word_size
word_span = self.find_span_for_word(word_start, word_end)
if word_span is None:
sentence_predicted_tags.append((word,
sentence.tag_list[i],
'O'))
prev_tag = 'O'
else:
word_type = word_span.tag
if word_span.start >= word_start - 1:
p_tag = 'B-{}'.format(word_type)
elif word_type == prev_tag:
p_tag = 'I-{}'.format(word_type)
else:
p_tag = 'B-{}'.format(word_type)
prev_tag = word_type
sentence_predicted_tags.append((word,
sentence.tag_list[i],
p_tag))
cur_abs_idx += word_size + 1
assert len(sentence_predicted_tags) == len(sentence.word_list)
self.sentence_predictions.append(sentence_predicted_tags)
prev_tag = 'O'
assert len(self.sentence_predictions) == len(self.dataset.get_sentences())
def find_span_for_word(self, word_abs_start, word_abs_end):
""" Find a span that a word falls in. If no span exists, return none.
To avoid searching through every span, we iterate backwards from the
word start position to find a span that starts at that position. Once
we find one (we only need to evaluate on the first span found), we
check to see if the word falls within that span."""
potential_span = None
for i in range(word_abs_start, -1, -1):
if i in self.filtered_predicted_spans:
potential_span = self.filtered_predicted_spans[i]
break
if potential_span is not None:
if word_abs_end <= potential_span.start + potential_span.length:
return potential_span
return None
def get_spans_from_seq(self, seq):
""" Recover predicted spans from a predicted sequence. """
spans = []
cur_idx = 0
cur_tag = self.rev_idxr[seq[cur_idx]]
while cur_tag != "STOP":
if cur_idx + 3 >= len(seq):
break
len_tag = self.rev_idxr[seq[cur_idx+1]]
typ_tag = self.rev_idxr[seq[cur_idx+2]]
if self.is_start(cur_tag) and self.is_length(len_tag) and self.is_type(typ_tag):
spans.append(byte.ByteSpan(start=self.get_num(cur_tag),
length=self.get_num(len_tag),
tag=typ_tag))
cur_idx += 3
cur_tag = self.rev_idxr[seq[cur_idx]]
else:
cur_idx += 1
cur_tag = self.rev_idxr[seq[cur_idx]]
return spans
def filter_predicted_spans(self):
""" Filter predicted spans to non-conflicting (overlapping) spans. """
pred_spans = sorted(self.all_predicted_spans, key=lambda x: x.start)
cur_idx = 0
while cur_idx < len(pred_spans):
cur_span = pred_spans[cur_idx]
# iterate over potential conflicts, and resolve
for i in range(cur_idx+1, len(pred_spans)):
compare_span = pred_spans[i]
# if the next span starts before current span ends, conflict
if compare_span.start < cur_span.start + cur_span.length:
# In the case of conflict, select the span which started
# earliest in it's respective segment. This heuristic comes
# from correspondence with the authors | |
+ "V")
self._add_to_listBox(self.pathList, self.edgeTuple[0])
self.pathList.select_set(6)
return self.edgeTuple[0] + "\n|Donor|\nV\n" + self.edgeTuple[1]
self._add_to_listBox(self.pathList,self.master.backs[self.finalNodeName][0].start)
for p in self.master.backs[self.finalNodeName]:
edge = self.master.scModel.getGraph().get_edge(p.start, p.end)
self.pathList.insert(END, 2 * tab + "|")
c += self._add_to_listBox(self.pathList, edge['op'])
self.pathList.insert(END, 2 * tab + "|")
self.pathList.insert(END, 2 * tab + "V")
c += 3
c += self._add_to_listBox(self.pathList, self.master.getFileNameForNode(p.end))
if self.master.getFileNameForNode(p.end) == self.edgeTuple[0]:
current = c
self.pathList.selection_set(current)
self.pathList.see(max(0,current-5))
return ""
def load_overlay(self, initialize):
"""
Lays out display for spatial overlay for image probes
:param initialize:
:return:
"""
edgeTuple = self.edgeTuple
message = 'final image'
if (len(self.link.split('->')) > 1):
probe = [probe for probe in self.master.probes if
probe.edgeId[1] in self.master.lookup[self.edgeTuple[0]] and probe.finalNodeId in self.master.lookup[
self.edgeTuple[1]]][0]
n = self.master.scModel.G.get_node(probe.finalNodeId)
finalFile = os.path.join(self.master.scModel.G.dir,
self.master.scModel.G.get_node(probe.finalNodeId)['file'])
final = openImage(finalFile)
finalResized = imageResizeRelative(final, (500, 500), final.size)
imResized = imageResizeRelative(probe.targetMaskImage, (500, 500),
probe.targetMaskImage.size if probe.targetMaskImage is not None else finalResized.size)
else:
message = 'donor'
probe = \
[probe for probe in self.master.probes if probe.edgeId[1] in self.master.lookup[edgeTuple[0]] and probe.donorBaseNodeId in self.master.lookup[edgeTuple[1]]][0]
final, final_file = self.master.scModel.G.get_image(probe.donorBaseNodeId)
finalResized = imageResizeRelative(final, (500, 500), final.size)
imResized = imageResizeRelative(probe.donorMaskImage, (500, 500),
probe.donorMaskImage.size if probe.donorMaskImage is not None else finalResized.size)
edge = self.master.scModel.getGraph().get_edge(probe.edgeId[0],probe.edgeId[1])
if initialize is True:
self.c = Canvas(self.cImgFrame, width=510, height=510)
self.c.pack()
self.transitionString(None)
try:
finalResized = finalResized.overlay(imResized)
except IndexError:
tex = self.c.create_text(250,250,width=400,font=("Courier", 20))
self.c.itemconfig(tex, text="The mask of link {} did not match the size of the {}.".format(self.link, message))
return
self.master.photos[self.link] = ImageTk.PhotoImage(finalResized.toPIL())
self.image_on_canvas = self.c.create_image(255, 255, image=self.master.photos[self.link], anchor=CENTER, tag='imgc')
def frameMove(self):
"""
change pages on inner display for videos
:return:
"""
if self in self.master.pageDisplays:
displays = self.master.pageDisplays[self][1]
d_index = self.cImgFrame.index('current')
displays[d_index].checkbox.grid()
for display in displays:
if display != displays[d_index]:
display.checkbox.grid_remove()
def scrollplt(self, *args):
"""
Handle scrolling function on temporal review graph.
:param args:
:return:
"""
if (args[0] == 'moveto'):
na = self.master.pltdata[self]
end = na[-1]
total = end[3]-end[2] + 20000
curframe = self.master.subplots[self].get_children()[1].xaxis.get_view_interval()
space = curframe[1]-curframe[0]
total *= float(args[1])
self.master.subplots[self].get_children()[1].xaxis.set_view_interval(total, total + space, ignore=True)
self.master.subplots[self].canvas.draw()
elif (args[0] == 'scroll'):
self.master.subplots[self].get_children()[1].xaxis.pan(int(args[1]))
self.master.subplots[self].canvas.draw()
def cache_designation(self):
"""
Cache the QA validation of probe designation.
:return:
"""
self.master.check_ok()
displays = self.master.pageDisplays[self][1] if self in self.master.pageDisplays else []
if len(displays) > 0:
validation = {'temporal': bool(displays[0].checkbox), 'spatial': bool(displays[1].checkbox) if len(displays) > 1 else False}
elegibility = [key for key in validation.keys() if validation[key] == True]
designation = '-'.join(elegibility) if len(elegibility) else 'detect'
else:
designation = self.probe.taskDesignation
self.master.qaData.set_qalink_designation(self.link, designation)
class DummyPage(Frame):
def __init__(self, master, labeltext = ''):
Frame.__init__(self, master=master)
self.mainlabel = Label(self, text= labeltext)
self.mainlabel.pack()
self.nextButton = Button(self, text='NEXT', command=master.nex)
self.nextButton.pack()
class SpatialReviewDisplay(Frame):
"""
The spatial review display for video
"""
def __init__(self, page):
Frame.__init__(self, master=page.cImgFrame, height=500,width=50)
page.cImgFrame.add(self, text='Spatial')
self.dialog = self.winfo_toplevel()
#Add Checkbox for spatial review
checkbox_info = page.checkboxes.boxes[-1].grid_info() if len(page.checkboxes.boxes) > 0 else {}
chkboxes_row = int(checkbox_info['row']) + 1 if len(checkbox_info) > 0 else 5
chkboxes_col = int(checkbox_info['column']) + 1 if len(checkbox_info) > 0 else 4
spatial_box_label = Label(master=page, text='Spatial Overlay Correct?', wraplength=250, justify=LEFT)
self.checkbox = Chkbox(parent=page, dialog=page.master, label=spatial_box_label, command=page.cache_designation,
value=page.master.qaData.get_qalink_designation(page.link) is not None)
self.checkbox.box.grid(row=chkboxes_row, column=chkboxes_col -1)
self.checkbox.label.grid(row=chkboxes_row, column=chkboxes_col, columnspan=4, sticky='W')
self.checkbox.grid_remove() #hide for now, Will be gridded by the frameMove function
if (len(page.link.split('->')) > 1):
probe = [probe for probe in page.master.probes if
probe.edgeId[1] in page.master.lookup[page.edgeTuple[0]] and probe.finalNodeId in
page.master.lookup[page.edgeTuple[1]]][0]
else:
probe = \
[probe for probe in page.master.probes if
probe.edgeId[1] in page.master.lookup[page.edgeTuple[0]] and probe.donorBaseNodeId in
page.master.lookup[
page.edgeTuple[1]]][0]
if probe.targetVideoSegments is not None:
to = os.path.join(self.dialog.scModel.get_dir(),probe.finalImageFileName)
overlay_file = compose_overlay_name(target_file=to, link=page.link)
total_range = (probe.targetVideoSegments[0].starttime/1000, probe.targetVideoSegments[-1].endtime/1000)
self.buttonText = StringVar()
self.buttonText.set(value=('PLAY: ' if os.path.exists(overlay_file) else 'GENERATE: ') + os.path.split(overlay_file)[1])
self.playbutton = Button(master=self, textvariable=self.buttonText,
command=lambda: self.openOverlay(probe=probe,
target_file=to,
overlay_path=overlay_file))
self.playbutton.grid(row=0, column=0, columnspan=2, sticky='W')
self.range_label = Label(master=self, text='Range: ' + '{:.2f}'.format(total_range[0]) + 's - ' + '{:.2f}'.format(total_range[1]) + 's')
self.range_label.grid(row=0, column= 3, columnspan = 1, sticky='W')
def openOverlay(self, probe=None, target_file = '', overlay_path=''):
if not os.path.exists(overlay_path):
GrayBlockOverlayGenerator(locator=self.dialog.meta_extractor.getMetaDataLocator(probe.edgeId[0]),
segments=probe.targetVideoSegments,
target_file=target_file, output_file=overlay_path).generate()
self.buttonText.set('PLAY: ' + os.path.split(overlay_path)[1])
openFile(overlay_path)
class TemporalReviewDisplay(Frame):
"""
The temporal review display for video
"""
def __init__(self, page):
Frame.__init__(self, master=page.cImgFrame)
page.cImgFrame.add(self, text='Temporal')
# Add Checkbox for spatial review
checkbox_info = page.checkboxes.boxes[-1].grid_info() if len(page.checkboxes.boxes) > 0 else {}
chkboxes_row = int(checkbox_info['row']) + 1 if len(checkbox_info) > 0 else 5
chkboxes_col = int(checkbox_info['column']) + 1 if len(checkbox_info) > 0 else 4
temporal_box_label = Label(master=page, text='Temporal data correct?', wraplength=250, justify=LEFT)
self.checkbox = Chkbox(parent=page, dialog=page.master, label=temporal_box_label, command=page.cache_designation,
value=page.master.qaData.get_qalink_designation(page.link) is not None)
self.checkbox.box.grid(row=chkboxes_row, column=chkboxes_col - 1)
self.checkbox.label.grid(row=chkboxes_row, column=chkboxes_col, columnspan=4, sticky='W')
self.checkbox.grid_remove() #hide for now, Will be gridded by the frameMove function
ps = [mpatches.Patch(color="red", label="Target Video"),
mpatches.Patch(color="blue", label="Current Manipulations"),
mpatches.Patch(color="green", label="Other Manipulations")]
data = []
f = Figure(figsize=(6, 4), dpi=100)
subplot = f.add_subplot(111)
subplot.legend(handles=ps, loc=8)
prolist = []
maxtsec = 0
for probe in page.master.probes:
maxtsec = max(maxtsec, probe.max_time())
if (page.finalNodeName == None):
if probe.donorBaseNodeId is not None and page.master.getFileNameForNode(probe.donorBaseNodeId) == \
page.edgeTuple[1]:
prolist.append(probe)
else:
if (page.master.getFileNameForNode(probe.finalNodeId) == page.edgeTuple[1]):
prolist.append(probe)
try:
tsec = get_end_time_from_segment(
page.master.meta_extractor.getMetaDataLocator(page.master.lookup[page.edgeTuple[1]][0]).getMaskSetForEntireVideo(
media_types=probe.media_types())[0]) / 1000.0
except Exception as ex:
logging.getLogger("maskgen").error(ex.message)
logging.getLogger("maskgen").error(
"{} Duration could not be found the length displayed in the graph is incorrect".format(
page.edgeTuple[1]))
tsec = maxtsec
ytics = []
ytic_lbl = []
count = 0
high = 0
low = tsec * 1000 + 20000
for probe in prolist:
count += 1
col = 2
cur = False
if (probe.edgeId[1] in page.master.lookup[page.edgeTuple[0]]):
col = 1
cur = True
if page.finalNodeName == None:
for mvs in probe.donorVideoSegments if probe.donorVideoSegments is not None else []:
data.append([count, col, mvs.starttime, mvs.endtime])
if cur:
high = max(high, mvs.endtime)
low = min(low, mvs.starttime)
subplot.text(mvs.starttime - 100, count - 0.5, "F:" + str(int(mvs.startframe)),
{'size': 10})
subplot.text(mvs.endtime + 100, count - 0.5, "F:" + str(int(mvs.endframe)), {'size': 10})
subplot.text(mvs.starttime - 100, count - 0.20, "T:" + str(int(mvs.starttime)),
{'size': 10})
subplot.text(mvs.endtime + 100, count - 0.20, "T:" + str(int(mvs.endtime)), {'size': 10})
else:
for mvs in probe.targetVideoSegments if probe.targetVideoSegments is not None else []:
data.append([count, col, mvs.starttime, mvs.endtime])
if cur:
high = max(high, mvs.endtime)
low = min(low, mvs.starttime)
subplot.text(mvs.starttime, count - 0.5, "F:" + str(int(mvs.startframe)), {'size': 10})
subplot.text(mvs.endtime, count - 0.5, "F:" + str(int(mvs.endframe)), {'size': 10})
subplot.text(mvs.starttime, count - 0.20, "T:" + str(int(mvs.starttime)), {'size': 10})
subplot.text(mvs.endtime, count - 0.20, "T:" + str(int(mvs.endtime)), {'size': 10})
ytics.append(count)
ytic_lbl.append(str(page.master.abreive(probe.edgeId[0])))
color_mapper = np.vectorize(lambda x: {0: 'red', 1: 'blue', 2: 'green'}.get(x))
data.append([count + 1, 0, 0.0, tsec * 1000.0])
ytics.append(count + 1)
ytic_lbl.append(page.master.abreive(page.edgeTuple[1]))
numpy_array = np.array(data)
subplot.hlines(numpy_array[:, 0], numpy_array[:, 2], numpy_array[:, 3], color_mapper(numpy_array[:, 1]),
linewidth=10)
subplot.set_yticks(ytics)
subplot.set_yticklabels(ytic_lbl)
subplot.set_xlabel('Time in Milliseconds')
subplot.grid()
i = subplot.yaxis.get_view_interval()
if (i[1] - i[0] < 10):
i[0] = i[1] - 8
subplot.yaxis.set_view_interval(i[0], i[1])
i = subplot.xaxis.get_view_interval()
if (i[1] - i[0] > 2000):
i[0] = low - 1000
i[1] = high + 1000
subplot.xaxis.set_view_interval(i[0], i[1])
page.master.pltdata[page] = numpy_array
canvas = Canvas(self, height=50, width=50)
imscroll = Scrollbar(self, orient=HORIZONTAL)
imscroll.grid(row=1, column=0, sticky=EW)
imscroll.config(command=page.scrollplt)
fcanvas = FigureCanvasTkAgg(f, master=canvas)
fcanvas.draw()
fcanvas.get_tk_widget().grid(row=0, column=0)
fcanvas._tkcanvas.grid(row=0, column=0)
canvas.grid(row=0, column=0)
canvas.config(height=50, width=50)
page.master.subplots[page] = f
class QAProjectDialog(Toplevel):
"""
Host window for QA pages
"""
manny_colors = [[155, 0, 0], [0, 155, 0], [0, 0, 155], [153, 76, 0], [96, 96, 96], [204, 204, 0], [160, 160, 160]]
def __init__(self, parent):
self.parent = parent
self.scModel = parent.scModel
self.meta_extractor = MetaDataExtractor(parent.scModel.getGraph())
self.probes = None
Toplevel.__init__(self, parent)
self.type = self.parent.scModel.getEndType()
self.pages = []
self.current_qa_page = None
self.checkboxes = {} #Checkboxes, keyed by page
self.backs = {}
self.lookup = {}
self.subplots ={}
self.pltdata = {}
self.backsProbes={}
self.photos = {}
self.commentsBoxes = {}
self.edges = {}
self.qaList = []
self.pathboxes = {}
self.qaData = maskgen.qa_logic.ValidationData(self.scModel)
self.resizable(width=False, height=False)
self.progressBars = []
self.narnia = {}
self.pageDisplays = {} #Frames that go inside pages, keyed by page.
self.valid = False
self.mannypage = MannyPage(self)
self.switch_frame(self.mannypage)
self.lastpage = None #Assigned in generate Pages
self.pages.append(self.mannypage)
self.getProbes()
if self.probes is None:
self.mannypage.statusLabelText.set('Probe Generation failed. Please consult logs for more details.')
self.parent.update()
else:
self.errors = [p for p in self.probes if p.failure]
if len(self.errors) > 0:
self.mannypage.statusLabelText.set('Probes Complete with errors. Generating Preview Pages.')
else:
self.mannypage.statusLabelText.set('Probes Complete. Generating Preview Pages.')
self.generate_pages()
def getProbes(self):
try:
generator = ProbeGenerator(
scModel=self.scModel,
processors=[
DetermineTaskDesignation(
scModel=self.scModel,
inputFunction=fetch_qaData_designation)])
self.probes = generator(saveTargets=False, keepFailures=True)
except Exception as e:
logging.getLogger('maskgen').error(str(e))
self.probes = None
def getFileNameForNode(self, nodeid):
try:
fn = self.scModel.getFileName(nodeid)
if fn not in self.lookup:
self.lookup[fn] = []
| |
bool:
#=======================================================================
# <auto type'> ::= 'in' '(' <types list> ')'
# | EPS
#=======================================================================
if self._current.is_IN():
self._append_syntaxic_node()
self._next_token_node()
if self._current.is_PAROP():
self._append_syntaxic_node()
self._next_token_node()
else:
self._append_error( FESyntaxErrors.AUTO_IN_PAROP )
if not self._types_list():
self._append_error( FESyntaxErrors.AUTO_IN_TYPES_LIST )
if self._current.is_PARCL():
self._append_syntaxic_node()
self._next_token_node()
else:
self._append_error( FESyntaxErrors.AUTO_IN_PARCL )
return True
#-------------------------------------------------------------------------
def _bitand_expr(self) -> bool:
#===============================================================================
# <bitand expr> ::= <shift expr> <bitand expr'>
#===============================================================================
if self._shift_expr():
self._bitand_expr1()
return True
else:
return False
#-------------------------------------------------------------------------
def _bitand_expr1(self) -> bool:
#=======================================================================
# <bitand expr'> ::= '&' <template args> <shift expr> <bitand expr'>
# | EPS
#=======================================================================
while self._current.is_BITAND():
self._append_syntaxic_node()
self._next_token_node()
self._template_args()
if not self._shift_expr():
self._append_error( FESyntaxErrors.BITAND_EXPR )
return True
#-------------------------------------------------------------------------
def _bitor_expr(self) -> bool:
#=======================================================================
# <bitor expr> ::= <bitxor expr> <bitor expr'>
#=======================================================================
if self._bitxor_expr():
self._bitor_expr1()
return True
else:
return False
#-------------------------------------------------------------------------
def _bitor_expr1(self) -> bool:
#=======================================================================
# <bitor expr'> ::= '|' <template args> <bitxor expr> <bitor expr'>
# | EPS
#=======================================================================
while self._current.is_BITOR():
self._append_syntaxic_node()
self._next_token_node()
self._template_args()
if not self._bitxor_expr():
self._append_error( FESyntaxErrors.BITOR_EXPR )
return True
#-------------------------------------------------------------------------
def _bitxor_expr(self) -> bool:
#=======================================================================
# <bitxor expr> ::= <bitand expr> <bitxor expr'>
#=======================================================================
if self._bitand_expr():
self._bitxor_expr1()
return True
else:
return False
#-------------------------------------------------------------------------
def _bitxor_expr1(self) -> bool:
#=======================================================================
# <bitxor expr'> ::= '^' <template args> <bitand expr> <bitxor expr'>
# | EPS
#=======================================================================
while self._current.is_BITXOR():
self._append_syntaxic_node()
self._next_token_node()
self._template_args()
if not self._bitand_expr():
self._append_error( FESyntaxErrors.BITXOR_EXPR )
return True
#-------------------------------------------------------------------------
def _boolean(self) -> bool:
#=======================================================================
# <boolean> ::= <TRUE> | <FALSE>
#=======================================================================
return self._true() or self._false()
#-------------------------------------------------------------------------
def _bracket_form(self) -> bool:
#=======================================================================
# <bracket form> ::= '[' <expression> <list or map form> ']'
#=======================================================================
if self._current.is_BRACKETOP():
self._append_syntaxic_node()
self._next_token_node()
if not self._expression():
self._append_error( FESyntaxErrors.BRACKET_FORM_EXPR )
if not self._list_or_map_form():
self._append_error( FESyntaxErrors.BRACKET_FORM_LIST_OR_MAP )
if self._current.is_BRACKETCL():
self._append_syntaxic_node()
self._next_token_node()
else:
self._append_error( FESyntaxErrors.BRACKET_ENDING )
return True
else:
return False
#-------------------------------------------------------------------------
def _call_operator(self) -> bool:
#=======================================================================
# <call operator> ::= '(' ')'
#=======================================================================
if self._current.is_PAROP():
self._append_syntaxic_node()
self._next_token_node()
if self._current.is_PARCL():
self._append_syntaxic_node()
self._next_token_node()
else:
self._append_error( FESyntaxErrors.CALL_OP )
return True
else:
return False
#-------------------------------------------------------------------------
def _case(self) -> bool:
#=======================================================================
# <case> ::= 'case' <expr list> <statements block>
#=======================================================================
if self._current.is_CASE():
self._append_syntaxic_node()
self._next_token_node()
if not self._expr_list():
self._append_error( FESyntaxErrors.CASE_EXPR )
if not self._statements_block():
self._append_error( FESyntaxErrors.CASE_BODY )
return True
else:
return False
#-------------------------------------------------------------------------
def _cast_op(self) -> bool:
#=======================================================================
# <cast op> ::= 'cast' <identifier>
#=======================================================================
if self._current.is_CAST():
self._append_syntaxic_node()
self._next_token_node()
if self._current.is_IDENT():
self._append_syntaxic_node()
self._next_token_node()
else:
self._append_error( FESyntaxErrors.CASTED_TYPE )
return True
else:
return False
#-------------------------------------------------------------------------
def _class_definition(self) -> bool:
#=======================================================================
# <class definition> ::= 'class' <identifier> <template def> <inheritance> <statements block>
#=======================================================================
if self._current.is_CLASS():
self._append_syntaxic_node()
self._next_token_node()
if not self._current._identifier():
self._append_error( FESyntaxErrors.CLASS_NAME )
self._template_def()
self._inheritance()
if not self._statements_block():
self._append_error( FESyntaxErrors.CLASS_BODY )
return True
else:
return False
#-------------------------------------------------------------------------
def _comment(self) -> bool:
#=======================================================================
# <comment> ::= '//' <comment'>
# | '/*' <multi lines comment>
# <comment'> ::= <any non newline char> <comment'>
# | <end line>
# | <ENDOFFILE>
#=======================================================================
if self._current.is_COMMENT() or self._current.is_COMMENT_ML(): ## (notice: previously scanned by the Scanner)
self._append_syntaxic_node()
self._next_token_node()
return True
else:
return False
#-------------------------------------------------------------------------
def _comparison(self) -> bool:
#=======================================================================
# <comparison> ::= <bitor expr> <comparison'>
#=======================================================================
return self._bitor_expr() and self._comparison1()
#-------------------------------------------------------------------------
def _comparison1(self) -> bool:
#===============================================================================
# <comparison'> ::= <comp operator> <template args> <bitor expr> <comparison'>
# | <comp operator'> <spaced template args> <bitor expr> <comparison'>
# | EPS
#===============================================================================
while True:
if self._comp_operator():
self._template_args()
if not self._bitor_expr():
self._append_error( FESyntaxErrors.COMP_EXPR )
elif self._comp_operator1():
self._spaced_template_args()
if not self._bitor_expr():
self._append_error( FESyntaxErrors.COMP_EXPR )
else:
break
return True
#-------------------------------------------------------------------------
def _comp_operator(self) -> bool:
#=======================================================================
# <comp operator> ::= '<=' | '==' | '!=' | '>='
# | 'in'
# | <is operator>
# | 'not' 'in'
#=======================================================================
if self._current.is_LE() or \
self._current.is_EQ() or \
self._current.is_NE() or \
self._current.is_GE() or \
self._current.is_IN():
self._append_syntaxic_node()
self._next_token_node()
return True
elif self._current.is_NOT():
self._append_syntaxic_node()
self._next_token_node()
if self._current.is_IN():
self._append_syntaxic_node()
self._next_token_node()
else:
self._append_error( FESyntaxErrors.NOT_IN )
return True
else:
return self._is_operator()
#-------------------------------------------------------------------------
def _comp_operator1(self) -> bool:
#=======================================================================
# <comp operator'> ::= '<' | '>' | '<=>'
#=======================================================================
if self._current.is_LT() or self._current.is_GT() or self._current.is_LEG():
self._append_syntaxic_node()
self._next_token_node()
return True
else:
return False
#-------------------------------------------------------------------------
def _compound_statement(self) -> bool:
#=======================================================================
# <compound statement> ::= <assign decl def func-call statement>
# | <embed statement>
# | <exclude statement>
# | <for statement>
# | <forever statement>
# | <if statement>
# | <repeat statement>
# | <switch statement>
# | <try statement>
# | <while statement>
# | <with statement>
#=======================================================================
return self._assign_decl_def_funccall_statement() or \
self._embed_statement() or \
self._exclude_statement() or \
self._for_statement() or \
self._forever_statement() or \
self._if_statement() or \
self._repeat_statement() or \
self._switch_statement() or \
self._try_statement() or \
self._while_statement() or \
self._with_statement()
#-------------------------------------------------------------------------
def _condition(self) -> bool:
#=======================================================================
# <condition> ::= <or test> <condition'>
#=======================================================================
return self._or_test() and self._condition1()
#-------------------------------------------------------------------------
def _condition1(self) -> bool:
#=======================================================================
# <condition'> ::= 'if' <or test> <condition">
# | EPS
#=======================================================================
if self._current.is_IF():
self._append_syntaxic_node()
self._next_token_node()
if not self._or_test():
self._append_error( FESyntaxErrors.IF_COND )
return self._condition2()
else:
return True
#-------------------------------------------------------------------------
def _condition2(self) -> bool:
#===========================================================================
# <condition"> ::= 'else' <expression>
# | 'otherwise' <expression>
#===========================================================================
if self._current.is_ELSE() or self._current.is_OTHERWISE():
self._append_syntaxic_node()
self._next_token_node()
if not self._expression():
self._append_error( FESyntaxErrors.IF_ELSE_EXPR if self._current.is_ELSE() \
else FESyntaxErrors.IF_OTHERWISE_EXPR )
return True
else:
self._append_error( FESyntaxErrors.IF_ELSE )
return True
#-------------------------------------------------------------------------
def _condition_or_unnamed_func(self) -> bool:
#=======================================================================
# <condition or unnamed func> ::= <or test>
# | <unnamed function>
#=======================================================================
return self._or_test() or self._unnamed_function()
#-------------------------------------------------------------------------
def _const_qualifier(self) -> bool:
#=======================================================================
# <const qualifier> ::= "const"
#=======================================================================
if self._current.is_CONST():
self._append_syntaxic_node()
self._next_token_node()
return True
else:
return False
#-------------------------------------------------------------------------
def _contained_type(self) -> bool:
#=======================================================================
# <contained type> ::= <declared contained type>
# | EPS
#=======================================================================
self._declared_contained_type() ## (notice: returned value doesn't matter)
return True
#-------------------------------------------------------------------------
def _container_type(self) -> bool:
#=======================================================================
# <container type> ::= <array_type>
# | <enum type>
# | <list type>
# | <map type>
# | <set type>
#=======================================================================
return self._array_type() or \
self.enum_type() or \
self._list_type() or \
self._map_type() or \
self._set_type()
#-------------------------------------------------------------------------
def _decl_constructor_or_decl_end(self) -> bool:
#=======================================================================
# <decl constructor or decl end> ::= <dotted name'> <decl or def statement'''>
# | <function definition'>
#=======================================================================
if self._dotted_name1():
if not self._decl_or_def_statement3():
self._append_error( FESyntaxErrors.DECL_DEF_IDENT_OP )
return True
else:
return self._function_definition1()
#-------------------------------------------------------------------------
def _decl_or_def_statement(self) -> bool:
#=======================================================================
# <decl or def statement> ::= <static qualifier> <decl or def statement'>
# | <class definition>
# | <decl or def statement'>
# | <forward decl>
#=======================================================================
if self._static_qualifier():
if not self._decl_or_def_statement1():
self._append_error( FESyntaxErrors.STATIC_DECL_DEF )
else:
return self._class_definition() or \
self._decl_or_def_statement1() or \
self._forward_decl()
#-------------------------------------------------------------------------
def _decl_or_def_statement1(self) -> bool:
#=======================================================================
# <decl or def statement'> ::= <abstract or final qualif> <method or operator definition>
# | <volatile qualifier> <TYPE> <identifier> <memory address> <simple statement end>
# | <type alias> <simple statement end>
# | <decl or def statement''>
#=======================================================================
if self._abstract_or_final_qualif():
if not self._method_or_operator_definition():
self._append_error( FESyntaxErrors.ABSTRACT_DEF )
return True
elif self._final_qualifier():
if not self._method_or_operator_definition():
self._append_error( FESyntaxErrors.FINAL_DEF )
return True
elif self._volatile_qualifier():
if not self._TYPE():
self._append_error( FESyntaxErrors.VOLATILE_TYPE )
if not self._identifier():
self._append_error( FESyntaxErrors.VAR_NAME )
return True
if not self._memory_address():
self._append_error( FESyntaxErrors.VOLATILE_MEM_KW )
if not self._simple_statement_end():
self._append_error( FESyntaxErrors.STATEMENT_END )
return True
elif self._type_alias():
if not self._simple_statement_end():
self._append_error( FESyntaxErrors.STATEMENT_END )
return True
else:
return self._decl_or_def_statement2()
#-------------------------------------------------------------------------
def _decl_or_def_statement2(self) -> bool:
#=======================================================================
# <decl or def statement''> ::= <TYPE'> <decl or def statement'''>
# | <enum definition>
# | <identifier> <decl constructor or decl end>
#=======================================================================
if self._TYPE1():
if not self._decl_or_def_statement3():
self._append_error( FESyntaxErrors.OP_IDENT_DECL_DEF )
return True
elif self._identifier():
if not self._decl_constructor_or_decl_end():
self._append_error( FESyntaxErrors.DECL_DEF_TYPE )
return True
elif self._enum_definition():
return True
else:
self._append_error( FESyntaxErrors.VAR_TYPE )
return False
#-------------------------------------------------------------------------
def _decl_or_def_statement3(self) -> bool:
#=======================================================================
# <decl or def statement'''> ::= <identifier> <decl or def statement''''>
# | <operator definition>
#=======================================================================
if self._identifier():
return self._decl_or_def_statement4()
else:
return self._operator_definition()
#-------------------------------------------------------------------------
def _decl_or_def_statement4(self) -> bool:
#=======================================================================
# <decl or def statement''''> ::= <function definition>
# | <var declaration or assignment> <simple statement end>
#=======================================================================
if self._function_declaration():
return True
elif self._var_declaration_or_assignment():
if not self._simple_statement_end():
self._append_error( FESyntaxErrors.STATEMENT_END )
return True
else:
return False
#-------------------------------------------------------------------------
def _declared_contained_type(self) -> bool:
#=======================================================================
# <declared contained type> ::= '<' <TYPE> '>'
#=======================================================================
if self._current.is_LT():
self._append_syntaxic_node()
self._next_token_node()
if not self._TYPE():
self._append_error( FESyntaxErrors.CONTAINED_TYPE )
if self._current.is_GT():
self._append_syntaxic_node()
self._next_token_node()
else:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.