gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import logging
import json
import os
import argparse
import csv
import hashlib
import sys
try:
from mapping_classes import InputClass
except ImportError:
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0], os.path.pardir, "src")))
from mapping_classes import InputClass
from mapping_classes import OutputClassCSVRealization, InputOutputMapperDirectory, OutputClassDirectory, \
CoderMapperJSONClass, TransformMapper, FunctionMapper, FilterHasKeyValueMapper, ChainMapper, CascadeKeyMapper, \
CascadeMapper, KeyTranslator, PassThroughFunctionMapper, CodeMapperDictClass, CodeMapperDictClass, ConstantMapper, \
ReplacementMapper, MapperClass
from prepared_source_classes import SourcePersonObject, SourceCareSiteObject, SourceEncounterObject, \
SourceObservationPeriodObject, SourceEncounterCoverageObject, SourceResultObject, SourceConditionObject, \
SourceProcedureObject, SourceMedicationObject, SourceLocationObject, SourceEncounterDetailObject
from source_to_cdm_functions import generate_mapper_obj, IntFloatMapper
from utility_functions import generate_observation_period
from prepared_source_functions import build_name_lookup_csv, build_key_func_dict
logging.basicConfig(level=logging.INFO)
class PopulationDemographics(InputClass):
def fields(self):
return ["empiPersonId", "gender_code", "gender_code_oid", "gender_code_text", "birthsex_code", "birthsex_code_oid",
"birthsex_code_text", "birthdate", "dateofdeath", "zip_code", "race_code", "race_code_oid", "race_code_text",
"ethnicity_code", "ethnicity_code_oid", "ethnicity_code_text", "active"]
class PopulationEncounter(InputClass):
def fields(self):
return ["encounterid", "empiPersonId", "hospitalizationstartdate", "readmission", "dischargedate", "servicedate",
"financialclass_code", "financialclass_code_oid", "financialclass_code_text", "hospitalservice_code",
"hospitalservice_code_oid", "hospitalservice_code_text", "classfication_code", "classification_code_oid",
"classification_code_text", "type_code", "type_code_oid", "type_code_text", "dischargedisposition_code",
"dischargedisposition_code_oid", "dischargedisposition_code_text", "dischargetolocation_code",
"dischargetolocation_code_oid", "dischargetolocation_code_text", "admissionsource_code",
"admissionsource_code_oid", "admissionsource_code_text", "admissiontype_code", "admissiontype_code_oid",
"admissiontype_code_text", "status_code", "status_code_oid", "status_code_text", "estimatedarrivaldate",
"estimateddeparturedate", "actualarrivaldate", "source", "active"]
class PopulationCondition(InputClass):
def fields(self):
return ["conditionid", "empiPersonId", "encounterid", "condition_code", "condition_code_oid", "condition_code_text",
"effectiveDate", "billingrank", "presentonadmission_code", "presentonadmission_code_oid",
"presentonadmission_text", "type_primary_code", "type_primary_code_oid", "type_primary_text",
"source"]
class PopulationProcedure(InputClass):
def fields(self):
return ["procedureid", "empiPersonId", "encounterid", "procedure_code", "procedure_code_oid",
"procedure_code_display", "modifier_code", "modifier_oid", "modifier_text", "servicestartdate",
"serviceenddate", "status_code", "status_oid", "active"]
class PopulationMedication(InputClass):
def fields(self):
return ["medicationid", "encounterid", "empiPersonId", "intendeddispenser", "startdate", "stopdate", "doseunit_code",
"doseunit_code_oid", "doseunit_code_text", "category_id", "category_code_oid", "category_code_text",
"frequency_id", "frequency_code_oid", "frequency_code_text", "status_code", "status_code_oid",
"status_code_text", "route_code", "route_code_oid", "route_code_text", "drug_code", "drug_code_oid",
"drug_code_text", "dosequantity", "source"]
class PopulationResult(InputClass):
def fields(self):
return ["resultid", "encounterid", "empiPersonId", "result_code", "result_code_oid", "result_code_text",
"result_type", "servicedate", "value_text", "value_numeric", "value_numeric_modifier", "unit_code",
"unit_code_oid", "unit_code_text", "value_codified_code", "value_codified_code_oid",
"value_codified_code_text", "date", "interpretation_code", "interpretation_code_oid",
"interpretation_code_text", "specimen_type_code", "specimen_type_code_oid", "specimen_type_code_text",
"bodysite_code", "bodysite_code_oid", "bodysite_code_text", "specimen_collection_date",
"specimen_received_date", "measurementmethod_code", "measurementmethod_code_oid",
"measurementmethod_code_text", "recordertype", "issueddate", "year"]
class PopulationObservationPeriod(InputClass):
def fields(self):
return []
class PopulationCareSite(InputClass):
def fields(self):
return []
class AddressLookup(InputClass):
def fields(self):
return []
class PopulationEncounterLocation(InputClass):
def fields(self):
return []
class DuplicateExcludeMapper(MapperClass):
"""Indicates that a row is a duplicate"""
def __init__(self, id_field):
self.id_field = id_field
self.id_dict = {"i_exclude": ""}
def map(self, input_dict):
if self.id_field in input_dict:
id_value = input_dict[self.id_field]
if id_value in self.id_dict:
return {"i_exclude": 1}
else:
self.id_dict[id_value] = 1
return {"i_exclude": ""}
else:
return {}
def main(input_csv_directory, output_csv_directory, file_name_dict):
output_class_obj = OutputClassDirectory()
in_out_map_obj = InputOutputMapperDirectory()
location_lookup_csv = os.path.join(input_csv_directory, "address_lookup.csv")
address_csv = os.path.join(input_csv_directory, "population_address.csv")
md5_func = lambda x: hashlib.md5(x.encode("utf8")).hexdigest()
source_location_csv = os.path.join(output_csv_directory, "source_location.csv")
key_location_mapper = build_name_lookup_csv(address_csv, location_lookup_csv,
["street_1", "street_2", "city", "state", "zip_code"],
["street_1", "street_2", "city", "state", "zip_code"], hashing_func=md5_func)
key_address_name_mapper = FunctionMapper(
build_key_func_dict(["street_1", "street_2", "city", "state", "zip_code"], separator="|"))
# k_location,s_address_1,s_address_2,s_city,s_state,s_zip,s_county,s_location_name
location_rules = [("key_name", "k_location"),
(("street_1", "street_2", "city", "state",
"zip_code"),
key_address_name_mapper,
{"mapped_value": "s_location_name"}),
("street_1", "s_address_1"),
("street_2", "s_address_2"),
("city", "s_city"),
("state", "s_state"),
("zip_code", "s_zip")
]
location_runner_obj = generate_mapper_obj(location_lookup_csv, AddressLookup(), source_location_csv,
SourceLocationObject(), location_rules,
output_class_obj, in_out_map_obj)
location_runner_obj.run()
input_patient_file_name = os.path.join(input_csv_directory, file_name_dict["demographic"])
# Source: https://www.hl7.org/fhir/v3/Race/cs.html
hl7_race_dict = {
"1002-5": "American Indian or Alaska Native",
"1004-1": "American Indian",
"1006-6": "Abenaki",
"1008-2": "Algonquian",
"1010-8": "Apache",
"1011-6": "Chiricahua",
"1012-4": "Fort Sill Apache",
"1013-2": "Jicarilla Apache",
"1014-0": "Lipan Apache",
"1015-7": "Mescalero Apache",
"1016-5": "Oklahoma Apache",
"1017-3": "Payson Apache",
"1018-1": "San Carlos Apache",
"1019-9": "White Mountain Apache",
"1021-5": "Arapaho",
"1022-3": "Northern Arapaho",
"1023-1": "Southern Arapaho",
"1024-9": "Wind River Arapaho",
"1026-4": "Arikara",
"1028-0": "Assiniboine",
"1030-6": "Assiniboine Sioux",
"1031-4": "Fort Peck Assiniboine Sioux",
"1033-0": "Bannock",
"1035-5": "Blackfeet",
"1037-1": "Brotherton",
"1039-7": "Burt Lake Band",
"1041-3": "Caddo",
"1042-1": "Oklahoma Cado",
"1044-7": "Cahuilla",
"1045-4": "Agua Caliente Cahuilla",
"1046-2": "Augustine",
"1047-0": "Cabazon",
"1048-8": "Los Coyotes",
"1049-6": "Morongo",
"1050-4": "Santa Rosa Cahuilla",
"1051-2": "Torres-Martinez",
"1053-8": "California Tribes",
"1054-6": "Cahto",
"1055-3": "Chimariko",
"1056-1": "Coast Miwok",
"1057-9": "Digger",
"1058-7": "Kawaiisu",
"1059-5": "Kern River",
"1060-3": "Mattole",
"1061-1": "Red Wood",
"1062-9": "Santa Rosa",
"1063-7": "Takelma",
"1064-5": "Wappo",
"1065-2": "Yana",
"1066-0": "Yuki",
"1068-6": "Canadian and Latin American Indian",
"1069-4": "Canadian Indian",
"1070-2": "Central American Indian",
"1071-0": "French American Indian",
"1072-8": "Mexican American Indian",
"1073-6": "South American Indian",
"1074-4": "Spanish American Indian",
"1076-9": "Catawba",
"1741-8": "Alatna",
"1742-6": "Alexander",
"1743-4": "Allakaket",
"1744-2": "Alanvik",
"1745-9": "Anvik",
"1746-7": "Arctic",
"1747-5": "Beaver",
"1748-3": "Birch Creek",
"1749-1": "Cantwell",
"1750-9": "Chalkyitsik",
"1751-7": "Chickaloon",
"1752-5": "Chistochina",
"1753-3": "Chitina",
"1754-1": "Circle",
"1755-8": "Cook Inlet",
"1756-6": "Copper Center",
"1757-4": "Copper River",
"1758-2": "Dot Lake",
"1759-0": "Doyon",
"1760-8": "Eagle",
"1761-6": "Eklutna",
"1762-4": "Evansville",
"1763-2": "Fort Yukon",
"1764-0": "Gakona",
"1765-7": "Galena",
"1766-5": "Grayling",
"1767-3": "Gulkana",
"1768-1": "Healy Lake",
"1769-9": "Holy Cross",
"1770-7": "Hughes",
"1771-5": "Huslia",
"1772-3": "Iliamna",
"1773-1": "Kaltag",
"1774-9": "Kluti Kaah",
"1775-6": "Knik",
"1776-4": "Koyukuk",
"1777-2": "Lake Minchumina",
"1778-0": "Lime",
"1779-8": "Mcgrath",
"1780-6": "Manley Hot Springs",
"1781-4": "Mentasta Lake",
"1782-2": "Minto",
"1783-0": "Nenana",
"1784-8": "Nikolai",
"1785-5": "Ninilchik",
"1786-3": "Nondalton",
"1787-1": "Northway",
"1788-9": "Nulato",
"1789-7": "Pedro Bay",
"1790-5": "Rampart",
"1791-3": "Ruby",
"1792-1": "Salamatof",
"1793-9": "Seldovia",
"1794-7": "Slana",
"1795-4": "Shageluk",
"1796-2": "Stevens",
"1797-0": "Stony River",
"1798-8": "Takotna",
"1799-6": "Tanacross",
"1800-2": "Tanaina",
"1801-0": "Tanana",
"1802-8": "Tanana Chiefs",
"1803-6": "Tazlina",
"1804-4": "Telida",
"1805-1": "Tetlin",
"1806-9": "Tok",
"1807-7": "Tyonek",
"1808-5": "Venetie",
"1809-3": "Wiseman",
"1078-5": "Cayuse",
"1080-1": "Chehalis",
"1082-7": "Chemakuan",
"1083-5": "Hoh",
"1084-3": "Quileute",
"1086-8": "Chemehuevi",
"1088-4": "Cherokee",
"1089-2": "Cherokee Alabama",
"1090-0": "Cherokees of Northeast Alabama",
"1091-8": "Cherokees of Southeast Alabama",
"1092-6": "Eastern Cherokee",
"1093-4": "Echota Cherokee",
"1094-2": "Etowah Cherokee",
"1095-9": "Northern Cherokee",
"1096-7": "Tuscola",
"1097-5": "United Keetowah Band of Cherokee",
"1098-3": "Western Cherokee",
"1100-7": "Cherokee Shawnee",
"1102-3": "Cheyenne",
"1103-1": "Northern Cheyenne",
"1104-9": "Southern Cheyenne",
"1106-4": "Cheyenne-Arapaho",
"1108-0": "Chickahominy",
"1109-8": "Eastern Chickahominy",
"1110-6": "Western Chickahominy",
"1112-2": "Chickasaw",
"1114-8": "Chinook",
"1115-5": "Clatsop",
"1116-3": "Columbia River Chinook",
"1117-1": "Kathlamet",
"1118-9": "Upper Chinook",
"1119-7": "Wakiakum Chinook",
"1120-5": "Willapa Chinook",
"1121-3": "Wishram",
"1123-9": "Chippewa",
"1124-7": "Bad River",
"1125-4": "Bay Mills Chippewa",
"1126-2": "Bois Forte",
"1127-0": "Burt Lake Chippewa",
"1128-8": "Fond du Lac",
"1129-6": "Grand Portage",
"1130-4": "Grand Traverse Band of Ottawa-Chippewa",
"1131-2": "Keweenaw",
"1132-0": "Lac Courte Oreilles",
"1133-8": "Lac du Flambeau",
"1134-6": "Lac Vieux Desert Chippewa",
"1135-3": "Lake Superior",
"1136-1": "Leech Lake",
"1137-9": "Little Shell Chippewa",
"1138-7": "Mille Lacs",
"1139-5": "Minnesota Chippewa",
"1140-3": "Ontonagon",
"1141-1": "Red Cliff Chippewa",
"1142-9": "Red Lake Chippewa",
"1143-7": "Saginaw Chippewa",
"1144-5": "St. Croix Chippewa",
"1145-2": "Sault Ste. Marie Chippewa",
"1146-0": "Sokoagon Chippewa",
"1147-8": "Turtle Mountain",
"1148-6": "White Earth",
"1150-2": "Chippewa Cree",
"1151-0": "Rocky Boy's Chippewa Cree",
"1153-6": "Chitimacha",
"1155-1": "Choctaw",
"1156-9": "Clifton Choctaw",
"1157-7": "Jena Choctaw",
"1158-5": "Mississippi Choctaw",
"1159-3": "Mowa Band of Choctaw",
"1160-1": "Oklahoma Choctaw",
"1162-7": "Chumash",
"1163-5": "Santa Ynez",
"1165-0": "Clear Lake",
"1167-6": "Coeur D'Alene",
"1169-2": "Coharie",
"1171-8": "Colorado River",
"1173-4": "Colville",
"1175-9": "Comanche",
"1176-7": "Oklahoma Comanche",
"1178-3": "Coos, Lower Umpqua, Siuslaw",
"1180-9": "Coos",
"1182-5": "Coquilles",
"1184-1": "Costanoan",
"1186-6": "Coushatta",
"1187-4": "Alabama Coushatta",
"1189-0": "Cowlitz",
"1191-6": "Cree",
"1193-2": "Creek",
"1194-0": "Alabama Creek",
"1195-7": "Alabama Quassarte",
"1196-5": "Eastern Creek",
"1197-3": "Eastern Muscogee",
"1198-1": "Kialegee",
"1199-9": "Lower Muscogee",
"1200-5": "Machis Lower Creek Indian",
"1201-3": "Poarch Band",
"1202-1": "Principal Creek Indian Nation",
"1203-9": "Star Clan of Muscogee Creeks",
"1204-7": "Thlopthlocco",
"1205-4": "Tuckabachee",
"1207-0": "Croatan",
"1209-6": "Crow",
"1211-2": "Cupeno",
"1212-0": "Agua Caliente",
"1214-6": "Delaware",
"1215-3": "Eastern Delaware",
"1216-1": "Lenni-Lenape",
"1217-9": "Munsee",
"1218-7": "Oklahoma Delaware",
"1219-5": "Rampough Mountain",
"1220-3": "Sand Hill",
"1222-9": "Diegueno",
"1223-7": "Campo",
"1224-5": "Capitan Grande",
"1225-2": "Cuyapaipe",
"1226-0": "La Posta",
"1227-8": "Manzanita",
"1228-6": "Mesa Grande",
"1229-4": "San Pasqual",
"1230-2": "Santa Ysabel",
"1231-0": "Sycuan",
"1233-6": "Eastern Tribes",
"1234-4": "Attacapa",
"1235-1": "Biloxi",
"1236-9": "Georgetown",
"1237-7": "Moor",
"1238-5": "Nansemond",
"1239-3": "Natchez",
"1240-1": "Nausu Waiwash",
"1241-9": "Nipmuc",
"1242-7": "Paugussett",
"1243-5": "Pocomoke Acohonock",
"1244-3": "Southeastern Indians",
"1245-0": "Susquehanock",
"1246-8": "Tunica Biloxi",
"1247-6": "Waccamaw-Siousan",
"1248-4": "Wicomico",
"1250-0": "Esselen",
"1252-6": "Fort Belknap",
"1254-2": "Fort Berthold",
"1256-7": "Fort Mcdowell",
"1258-3": "Fort Hall",
"1260-9": "Gabrieleno",
"1262-5": "Grand Ronde",
"1264-1": "Gros Ventres",
"1265-8": "Atsina",
"1267-4": "Haliwa",
"1269-0": "Hidatsa",
"1271-6": "Hoopa",
"1272-4": "Trinity",
"1273-2": "Whilkut",
"1275-7": "Hoopa Extension",
"1277-3": "Houma",
"1279-9": "Inaja-Cosmit",
"1281-5": "Iowa",
"1282-3": "Iowa of Kansas-Nebraska",
"1283-1": "Iowa of Oklahoma",
"1285-6": "Iroquois",
"1286-4": "Cayuga",
"1287-2": "Mohawk",
"1288-0": "Oneida",
"1289-8": "Onondaga",
"1290-6": "Seneca",
"1291-4": "Seneca Nation",
"1292-2": "Seneca-Cayuga",
"1293-0": "Tonawanda Seneca",
"1294-8": "Tuscarora",
"1295-5": "Wyandotte",
"1297-1": "Juaneno",
"1299-7": "Kalispel",
"1301-1": "Karuk",
"1303-7": "Kaw",
"1305-2": "Kickapoo",
"1306-0": "Oklahoma Kickapoo",
"1307-8": "Texas Kickapoo",
"1309-4": "Kiowa",
"1310-2": "Oklahoma Kiowa",
"1312-8": "Klallam",
"1313-6": "Jamestown",
"1314-4": "Lower Elwha",
"1315-1": "Port Gamble Klallam",
"1317-7": "Klamath",
"1319-3": "Konkow",
"1321-9": "Kootenai",
"1323-5": "Lassik",
"1325-0": "Long Island",
"1326-8": "Matinecock",
"1327-6": "Montauk",
"1328-4": "Poospatuck",
"1329-2": "Setauket",
"1331-8": "Luiseno",
"1332-6": "La Jolla",
"1333-4": "Pala",
"1334-2": "Pauma",
"1335-9": "Pechanga",
"1336-7": "Soboba",
"1337-5": "Twenty-Nine Palms",
"1338-3": "Temecula",
"1340-9": "Lumbee",
"1342-5": "Lummi",
"1344-1": "Maidu",
"1345-8": "Mountain Maidu",
"1346-6": "Nishinam",
"1348-2": "Makah",
"1350-8": "Maliseet",
"1352-4": "Mandan",
"1354-0": "Mattaponi",
"1356-5": "Menominee",
"1358-1": "Miami",
"1359-9": "Illinois Miami",
"1360-7": "Indiana Miami",
"1361-5": "Oklahoma Miami",
"1363-1": "Miccosukee",
"1365-6": "Micmac",
"1366-4": "Aroostook",
"1368-0": "Mission Indians",
"1370-6": "Miwok",
"1372-2": "Modoc",
"1374-8": "Mohegan",
"1376-3": "Mono",
"1378-9": "Nanticoke",
"1380-5": "Narragansett",
"1382-1": "Navajo",
"1383-9": "Alamo Navajo",
"1384-7": "Canoncito Navajo",
"1385-4": "Ramah Navajo",
"1387-0": "Nez Perce",
"1389-6": "Nomalaki",
"1391-2": "Northwest Tribes",
"1392-0": "Alsea",
"1393-8": "Celilo",
"1394-6": "Columbia",
"1395-3": "Kalapuya",
"1396-1": "Molala",
"1397-9": "Talakamish",
"1398-7": "Tenino",
"1399-5": "Tillamook",
"1400-1": "Wenatchee",
"1401-9": "Yahooskin",
"1403-5": "Omaha",
"1405-0": "Oregon Athabaskan",
"1407-6": "Osage",
"1409-2": "Otoe-Missouria",
"1411-8": "Ottawa",
"1412-6": "Burt Lake Ottawa",
"1413-4": "Michigan Ottawa",
"1414-2": "Oklahoma Ottawa",
"1416-7": "Paiute",
"1417-5": "Bishop",
"1418-3": "Bridgeport",
"1419-1": "Burns Paiute",
"1420-9": "Cedarville",
"1421-7": "Fort Bidwell",
"1422-5": "Fort Independence",
"1423-3": "Kaibab",
"1424-1": "Las Vegas",
"1425-8": "Lone Pine",
"1426-6": "Lovelock",
"1427-4": "Malheur Paiute",
"1428-2": "Moapa",
"1429-0": "Northern Paiute",
"1430-8": "Owens Valley",
"1431-6": "Pyramid Lake",
"1432-4": "San Juan Southern Paiute",
"1433-2": "Southern Paiute",
"1434-0": "Summit Lake",
"1435-7": "Utu Utu Gwaitu Paiute",
"1436-5": "Walker River",
"1437-3": "Yerington Paiute",
"1439-9": "Pamunkey",
"1441-5": "Passamaquoddy",
"1442-3": "Indian Township",
"1443-1": "Pleasant Point Passamaquoddy",
"1445-6": "Pawnee",
"1446-4": "Oklahoma Pawnee",
"1448-0": "Penobscot",
"1450-6": "Peoria",
"1451-4": "Oklahoma Peoria",
"1453-0": "Pequot",
"1454-8": "Marshantucket Pequot",
"1456-3": "Pima",
"1457-1": "Gila River Pima-Maricopa",
"1458-9": "Salt River Pima-Maricopa",
"1460-5": "Piscataway",
"1462-1": "Pit River",
"1464-7": "Pomo",
"1465-4": "Central Pomo",
"1466-2": "Dry Creek",
"1467-0": "Eastern Pomo",
"1468-8": "Kashia",
"1469-6": "Northern Pomo",
"1470-4": "Scotts Valley",
"1471-2": "Stonyford",
"1472-0": "Sulphur Bank",
"1474-6": "Ponca",
"1475-3": "Nebraska Ponca",
"1476-1": "Oklahoma Ponca",
"1478-7": "Potawatomi",
"1479-5": "Citizen Band Potawatomi",
"1480-3": "Forest County",
"1481-1": "Hannahville",
"1482-9": "Huron Potawatomi",
"1483-7": "Pokagon Potawatomi",
"1484-5": "Prairie Band",
"1485-2": "Wisconsin Potawatomi",
"1487-8": "Powhatan",
"1489-4": "Pueblo",
"1490-2": "Acoma",
"1491-0": "Arizona Tewa",
"1492-8": "Cochiti",
"1493-6": "Hopi",
"1494-4": "Isleta",
"1495-1": "Jemez",
"1496-9": "Keres",
"1497-7": "Laguna",
"1498-5": "Nambe",
"1499-3": "Picuris",
"1500-8": "Piro",
"1501-6": "Pojoaque",
"1502-4": "San Felipe",
"1503-2": "San Ildefonso",
"1504-0": "San Juan Pueblo",
"1505-7": "San Juan De",
"1506-5": "San Juan",
"1507-3": "Sandia",
"1508-1": "Santa Ana",
"1509-9": "Santa Clara",
"1510-7": "Santo Domingo",
"1511-5": "Taos",
"1512-3": "Tesuque",
"1513-1": "Tewa",
"1514-9": "Tigua",
"1515-6": "Zia",
"1516-4": "Zuni",
"1518-0": "Puget Sound Salish",
"1519-8": "Duwamish",
"1520-6": "Kikiallus",
"1521-4": "Lower Skagit",
"1522-2": "Muckleshoot",
"1523-0": "Nisqually",
"1524-8": "Nooksack",
"1525-5": "Port Madison",
"1526-3": "Puyallup",
"1527-1": "Samish",
"1528-9": "Sauk-Suiattle",
"1529-7": "Skokomish",
"1530-5": "Skykomish",
"1531-3": "Snohomish",
"1532-1": "Snoqualmie",
"1533-9": "Squaxin Island",
"1534-7": "Steilacoom",
"1535-4": "Stillaguamish",
"1536-2": "Suquamish",
"1537-0": "Swinomish",
"1538-8": "Tulalip",
"1539-6": "Upper Skagit",
"1541-2": "Quapaw",
"1543-8": "Quinault",
"1545-3": "Rappahannock",
"1547-9": "Reno-Sparks",
"1549-5": "Round Valley",
"1551-1": "Sac and Fox",
"1552-9": "Iowa Sac and Fox",
"1553-7": "Missouri Sac and Fox",
"1554-5": "Oklahoma Sac and Fox",
"1556-0": "Salinan",
"1558-6": "Salish",
"1560-2": "Salish and Kootenai",
"1562-8": "Schaghticoke",
"1564-4": "Scott Valley",
"1566-9": "Seminole",
"1567-7": "Big Cypress",
"1568-5": "Brighton",
"1569-3": "Florida Seminole",
"1570-1": "Hollywood Seminole",
"1571-9": "Oklahoma Seminole",
"1573-5": "Serrano",
"1574-3": "San Manual",
"1576-8": "Shasta",
"1578-4": "Shawnee",
"1579-2": "Absentee Shawnee",
"1580-0": "Eastern Shawnee",
"1582-6": "Shinnecock",
"1584-2": "Shoalwater Bay",
"1586-7": "Shoshone",
"1587-5": "Battle Mountain",
"1588-3": "Duckwater",
"1589-1": "Elko",
"1590-9": "Ely",
"1591-7": "Goshute",
"1592-5": "Panamint",
"1593-3": "Ruby Valley",
"1594-1": "Skull Valley",
"1595-8": "South Fork Shoshone",
"1596-6": "Te-Moak Western Shoshone",
"1597-4": "Timbi-Sha Shoshone",
"1598-2": "Washakie",
"1599-0": "Wind River Shoshone",
"1600-6": "Yomba",
"1602-2": "Shoshone Paiute",
"1603-0": "Duck Valley",
"1604-8": "Fallon",
"1605-5": "Fort McDermitt",
"1607-1": "Siletz",
"1609-7": "Sioux",
"1610-5": "Blackfoot Sioux",
"1611-3": "Brule Sioux",
"1612-1": "Cheyenne River Sioux",
"1613-9": "Crow Creek Sioux",
"1614-7": "Dakota Sioux",
"1615-4": "Flandreau Santee",
"1616-2": "Fort Peck",
"1617-0": "Lake Traverse Sioux",
"1618-8": "Lower Brule Sioux",
"1619-6": "Lower Sioux",
"1620-4": "Mdewakanton Sioux",
"1621-2": "Miniconjou",
"1622-0": "Oglala Sioux",
"1623-8": "Pine Ridge Sioux",
"1624-6": "Pipestone Sioux",
"1625-3": "Prairie Island Sioux",
"1626-1": "Prior Lake Sioux",
"1627-9": "Rosebud Sioux",
"1628-7": "Sans Arc Sioux",
"1629-5": "Santee Sioux",
"1630-3": "Sisseton-Wahpeton",
"1631-1": "Sisseton Sioux",
"1632-9": "Spirit Lake Sioux",
"1633-7": "Standing Rock Sioux",
"1634-5": "Teton Sioux",
"1635-2": "Two Kettle Sioux",
"1636-0": "Upper Sioux",
"1637-8": "Wahpekute Sioux",
"1638-6": "Wahpeton Sioux",
"1639-4": "Wazhaza Sioux",
"1640-2": "Yankton Sioux",
"1641-0": "Yanktonai Sioux",
"1643-6": "Siuslaw",
"1645-1": "Spokane",
"1647-7": "Stewart",
"1649-3": "Stockbridge",
"1651-9": "Susanville",
"1653-5": "Tohono O'Odham",
"1654-3": "Ak-Chin",
"1655-0": "Gila Bend",
"1656-8": "San Xavier",
"1657-6": "Sells",
"1659-2": "Tolowa",
"1661-8": "Tonkawa",
"1663-4": "Tygh",
"1665-9": "Umatilla",
"1667-5": "Umpqua",
"1668-3": "Cow Creek Umpqua",
"1670-9": "Ute",
"1671-7": "Allen Canyon",
"1672-5": "Uintah Ute",
"1673-3": "Ute Mountain Ute",
"1675-8": "Wailaki",
"1677-4": "Walla-Walla",
"1679-0": "Wampanoag",
"1680-8": "Gay Head Wampanoag",
"1681-6": "Mashpee Wampanoag",
"1683-2": "Warm Springs",
"1685-7": "Wascopum",
"1687-3": "Washoe",
"1688-1": "Alpine",
"1689-9": "Carson",
"1690-7": "Dresslerville",
"1692-3": "Wichita",
"1694-9": "Wind River",
"1696-4": "Winnebago",
"1697-2": "Ho-chunk",
"1698-0": "Nebraska Winnebago",
"1700-4": "Winnemucca",
"1702-0": "Wintun",
"1704-6": "Wiyot",
"1705-3": "Table Bluff",
"1707-9": "Yakama",
"1709-5": "Yakama Cowlitz",
"1711-1": "Yaqui",
"1712-9": "Barrio Libre",
"1713-7": "Pascua Yaqui",
"1715-2": "Yavapai Apache",
"1717-8": "Yokuts",
"1718-6": "Chukchansi",
"1719-4": "Tachi",
"1720-2": "Tule River",
"1722-8": "Yuchi",
"1724-4": "Yuman",
"1725-1": "Cocopah",
"1726-9": "Havasupai",
"1727-7": "Hualapai",
"1728-5": "Maricopa",
"1729-3": "Mohave",
"1730-1": "Quechan",
"1731-9": "Yavapai",
"1732-7": "Yurok",
"1733-5": "Coast Yurok",
"1735-0": "Alaska Native",
"1737-6": "Alaska Indian",
"1739-2": "Alaskan Athabascan",
"1740-0": "Ahtna",
"1811-9": "Southeast Alaska",
"1813-5": "Tlingit-Haida",
"1814-3": "Angoon",
"1815-0": "Central Council of Tlingit and Haida Tribes",
"1816-8": "Chilkat",
"1817-6": "Chilkoot",
"1818-4": "Craig",
"1819-2": "Douglas",
"1820-0": "Haida",
"1821-8": "Hoonah",
"1822-6": "Hydaburg",
"1823-4": "Kake",
"1824-2": "Kasaan",
"1825-9": "Kenaitze",
"1826-7": "Ketchikan",
"1827-5": "Klawock",
"1828-3": "Pelican",
"1829-1": "Petersburg",
"1830-9": "Saxman",
"1831-7": "Sitka",
"1832-5": "Tenakee Springs",
"1833-3": "Tlingit",
"1834-1": "Wrangell",
"1835-8": "Yakutat",
"1837-4": "Tsimshian",
"1838-2": "Metlakatla",
"1840-8": "Eskimo",
"1842-4": "Greenland Eskimo",
"1844-0": "Inupiat Eskimo",
"1845-7": "Ambler",
"1846-5": "Anaktuvuk",
"1847-3": "Anaktuvuk Pass",
"1848-1": "Arctic Slope Inupiat",
"1849-9": "Arctic Slope Corporation",
"1850-7": "Atqasuk",
"1851-5": "Barrow",
"1852-3": "Bering Straits Inupiat",
"1853-1": "Brevig Mission",
"1854-9": "Buckland",
"1855-6": "Chinik",
"1856-4": "Council",
"1857-2": "Deering",
"1858-0": "Elim",
"1859-8": "Golovin",
"1860-6": "Inalik Diomede",
"1861-4": "Inupiaq",
"1862-2": "Kaktovik",
"1863-0": "Kawerak",
"1864-8": "Kiana",
"1865-5": "Kivalina",
"1866-3": "Kobuk",
"1867-1": "Kotzebue",
"1868-9": "Koyuk",
"1869-7": "Kwiguk",
"1870-5": "Mauneluk Inupiat",
"1871-3": "Nana Inupiat",
"1872-1": "Noatak",
"1873-9": "Nome",
"1874-7": "Noorvik",
"1875-4": "Nuiqsut",
"1876-2": "Point Hope",
"1877-0": "Point Lay",
"1878-8": "Selawik",
"1879-6": "Shaktoolik",
"1880-4": "Shishmaref",
"1881-2": "Shungnak",
"1882-0": "Solomon",
"1883-8": "Teller",
"1884-6": "Unalakleet",
"1885-3": "Wainwright",
"1886-1": "Wales",
"1887-9": "White Mountain",
"1888-7": "White Mountain Inupiat",
"1889-5": "Mary's Igloo",
"1891-1": "Siberian Eskimo",
"1892-9": "Gambell",
"1893-7": "Savoonga",
"1894-5": "Siberian Yupik",
"1896-0": "Yupik Eskimo",
"1897-8": "Akiachak",
"1898-6": "Akiak",
"1899-4": "Alakanuk",
"1900-0": "Aleknagik",
"1901-8": "Andreafsky",
"1902-6": "Aniak",
"1903-4": "Atmautluak",
"1904-2": "Bethel",
"1905-9": "Bill Moore's Slough",
"1906-7": "Bristol Bay Yupik",
"1907-5": "Calista Yupik",
"1908-3": "Chefornak",
"1909-1": "Chevak",
"1910-9": "Chuathbaluk",
"1911-7": "Clark's Point",
"1912-5": "Crooked Creek",
"1913-3": "Dillingham",
"1914-1": "Eek",
"1915-8": "Ekuk",
"1916-6": "Ekwok",
"1917-4": "Emmonak",
"1918-2": "Goodnews Bay",
"1919-0": "Hooper Bay",
"1920-8": "Iqurmuit (Russian Mission)",
"1921-6": "Kalskag",
"1922-4": "Kasigluk",
"1923-2": "Kipnuk",
"1924-0": "Koliganek",
"1925-7": "Kongiganak",
"1926-5": "Kotlik",
"1927-3": "Kwethluk",
"1928-1": "Kwigillingok",
"1929-9": "Levelock",
"1930-7": "Lower Kalskag",
"1931-5": "Manokotak",
"1932-3": "Marshall",
"1933-1": "Mekoryuk",
"1934-9": "Mountain Village",
"1935-6": "Naknek",
"1936-4": "Napaumute",
"1937-2": "Napakiak",
"1938-0": "Napaskiak",
"1939-8": "Newhalen",
"1940-6": "New Stuyahok",
"1941-4": "Newtok",
"1942-2": "Nightmute",
"1943-0": "Nunapitchukv",
"1944-8": "Oscarville",
"1945-5": "Pilot Station",
"1946-3": "Pitkas Point",
"1947-1": "Platinum",
"1948-9": "Portage Creek",
"1949-7": "Quinhagak",
"1950-5": "Red Devil",
"1951-3": "St. Michael",
"1952-1": "Scammon Bay",
"1953-9": "Sheldon's Point",
"1954-7": "Sleetmute",
"1955-4": "Stebbins",
"1956-2": "Togiak",
"1957-0": "Toksook",
"1958-8": "Tulukskak",
"1959-6": "Tuntutuliak",
"1960-4": "Tununak",
"1961-2": "Twin Hills",
"1962-0": "Georgetown",
"1963-8": "St. Mary's",
"1964-6": "Umkumiate",
"1966-1": "Aleut",
"1968-7": "Alutiiq Aleut",
"1969-5": "Tatitlek",
"1970-3": "Ugashik",
"1972-9": "Bristol Bay Aleut",
"1973-7": "Chignik",
"1974-5": "Chignik Lake",
"1975-2": "Egegik",
"1976-0": "Igiugig",
"1977-8": "Ivanof Bay",
"1978-6": "King Salmon",
"1979-4": "Kokhanok",
"1980-2": "Perryville",
"1981-0": "Pilot Point",
"1982-8": "Port Heiden",
"1984-4": "Chugach Aleut",
"1985-1": "Chenega",
"1986-9": "Chugach Corporation",
"1987-7": "English Bay",
"1988-5": "Port Graham",
"1990-1": "Eyak",
"1992-7": "Koniag Aleut",
"1993-5": "Akhiok",
"1994-3": "Agdaagux",
"1995-0": "Karluk",
"1996-8": "Kodiak",
"1997-6": "Larsen Bay",
"1998-4": "Old Harbor",
"1999-2": "Ouzinkie",
"2000-8": "Port Lions",
"2002-4": "Sugpiaq",
"2004-0": "Suqpigaq",
"2006-5": "Unangan Aleut",
"2007-3": "Akutan",
"2008-1": "Aleut Corporation",
"2009-9": "Aleutian",
"2010-7": "Aleutian Islander",
"2011-5": "Atka",
"2012-3": "Belkofski",
"2013-1": "Chignik Lagoon",
"2014-9": "King Cove",
"2015-6": "False Pass",
"2016-4": "Nelson Lagoon",
"2017-2": "Nikolski",
"2018-0": "Pauloff Harbor",
"2019-8": "Qagan Toyagungin",
"2020-6": "Qawalangin",
"2021-4": "St. George",
"2022-2": "St. Paul",
"2023-0": "Sand Point",
"2024-8": "South Naknek",
"2025-5": "Unalaska",
"2026-3": "Unga",
"2028-9": "Asian",
"2029-7": "Asian Indian",
"2030-5": "Bangladeshi",
"2031-3": "Bhutanese",
"2032-1": "Burmese",
"2033-9": "Cambodian",
"2034-7": "Chinese",
"2035-4": "Taiwanese",
"2036-2": "Filipino",
"2037-0": "Hmong",
"2038-8": "Indonesian",
"2039-6": "Japanese",
"2040-4": "Korean",
"2041-2": "Laotian",
"2042-0": "Malaysian",
"2043-8": "Okinawan",
"2044-6": "Pakistani",
"2045-3": "Sri Lankan",
"2046-1": "Thai",
"2047-9": "Vietnamese",
"2048-7": "Iwo Jiman",
"2049-5": "Maldivian",
"2050-3": "Nepalese",
"2051-1": "Singaporean",
"2052-9": "Madagascar",
"2054-5": "Black or African American",
"2056-0": "Black",
"2058-6": "African American",
"2060-2": "African",
"2061-0": "Botswanan",
"2062-8": "Ethiopian",
"2063-6": "Liberian",
"2064-4": "Namibian",
"2065-1": "Nigerian",
"2066-9": "Zairean",
"2067-7": "Bahamian",
"2068-5": "Barbadian",
"2069-3": "Dominican",
"2070-1": "Dominica Islander",
"2071-9": "Haitian",
"2072-7": "Jamaican",
"2073-5": "Tobagoan",
"2074-3": "Trinidadian",
"2075-0": "West Indian",
"2076-8": "Native Hawaiian or Other Pacific Islander",
"2078-4": "Polynesian",
"2079-2": "Native Hawaiian",
"2080-0": "Samoan",
"2081-8": "Tahitian",
"2082-6": "Tongan",
"2083-4": "Tokelauan",
"2085-9": "Micronesian",
"2086-7": "Guamanian or Chamorro",
"2087-5": "Guamanian",
"2088-3": "Chamorro",
"2089-1": "Mariana Islander",
"2090-9": "Marshallese",
"2091-7": "Palauan",
"2092-5": "Carolinian",
"2093-3": "Kosraean",
"2094-1": "Pohnpeian",
"2095-8": "Saipanese",
"2096-6": "Kiribati",
"2097-4": "Chuukese",
"2098-2": "Yapese",
"2100-6": "Melanesian",
"2101-4": "Fijian",
"2102-2": "Papua New Guinean",
"2103-0": "Solomon Islander",
"2104-8": "New Hebrides",
"2500-7": "Other Pacific Islander",
"2106-3": "White",
"2108-9": "European",
"2109-7": "Armenian",
"2110-5": "English",
"2111-3": "French",
"2112-1": "German",
"2113-9": "Irish",
"2114-7": "Italian",
"2115-4": "Polish",
"2116-2": "Scottish",
"2118-8": "Middle Eastern or North African",
"2119-6": "Assyrian",
"2120-4": "Egyptian",
"2121-2": "Iranian",
"2122-0": "Iraqi",
"2123-8": "Lebanese",
"2124-6": "Palestinian",
"2125-3": "Syrian",
"2126-1": "Afghanistani",
"2127-9": "Israeili",
"2129-5": "Arab",
"2131-1": "Other Race"
}
hl7_ethnicity_dict = {
"2135-2": "Hispanic or Latino",
"2137-8": "Spaniard",
"2138-6": "Andalusian",
"2139-4": "Asturian",
"2140-2": "Castillian",
"2141-0": "Catalonian",
"2142-8": "Belearic Islander",
"2143-6": "Gallego",
"2144-4": "Valencian",
"2145-1": "Canarian",
"2146-9": "Spanish Basque",
"2148-5": "Mexican",
"2149-3": "Mexican American",
"2150-1": "Mexicano",
"2151-9": "Chicano",
"2152-7": "La Raza",
"2153-5": "Mexican American Indian",
"2155-0": "Central American",
"2156-8": "Costa Rican",
"2157-6": "Guatemalan",
"2158-4": "Honduran",
"2159-2": "Nicaraguan",
"2160-0": "Panamanian",
"2161-8": "Salvadoran",
"2162-6": "Central American Indian",
"2163-4": "Canal Zone",
"2165-9": "South American",
"2166-7": "Argentinean",
"2167-5": "Bolivian",
"2168-3": "Chilean",
"2169-1": "Colombian",
"2170-9": "Ecuadorian",
"2171-7": "Paraguayan",
"2172-5": "Peruvian",
"2173-3": "Uruguayan",
"2174-1": "Venezuelan",
"2175-8": "South American Indian",
"2176-6": "Criollo",
"2178-2": "Latin American",
"2180-8": "Puerto Rican",
"2182-4": "Cuban",
"2184-0": "Dominican",
"2186-5": "Not Hispanic or Latino",
}
def gender_correct(input_dict):
if "gender_code" in input_dict:
gender_code = input_dict["gender_code"]
if gender_code == "248152002":
return {"gender_code": "F"}
elif gender_code == "248153007":
return {"gender_code": "M"}
else:
return input_dict
else:
return {}
person_id_duplicate_mapper = DuplicateExcludeMapper("empiPersonId")
population_patient_rules = [("empiPersonId", "s_person_id"),
("gender_code", PassThroughFunctionMapper(gender_correct), {"gender_code": "s_gender"}),
("gender_code", "m_gender"),
("birthdate", "s_birth_datetime"),
("dateofdeath", "s_death_datetime"),
("race_code", "s_race"),
("race_code", CodeMapperDictClass(hl7_race_dict, key_to_map_to="m_race"), {"m_race": "m_race"}),
("ethnicity_code", "s_ethnicity"),
("ethnicity_code", CodeMapperDictClass(hl7_ethnicity_dict, key_to_map_to="m_ethnicity"), {"m_ethnicity": "m_ethnicity"}),
(("street_1", "street_2", "city", "state", "zip_code"),
key_location_mapper, {"mapped_value": "k_location"}),
("empiPersonId", person_id_duplicate_mapper, {"i_exclude": "i_exclude"})
]
output_person_csv = os.path.join(output_csv_directory, "source_person.csv")
source_person_runner_obj = generate_mapper_obj(input_patient_file_name, PopulationDemographics(), output_person_csv,
SourcePersonObject(), population_patient_rules,
output_class_obj, in_out_map_obj)
source_person_runner_obj.run() # Run the mapper
# Care site
care_site_csv = os.path.join(input_csv_directory, "care_site.csv")
md5_func = lambda x: hashlib.md5(x.encode("utf8")).hexdigest()
population_care_site = os.path.join(input_csv_directory, "population_care_site.csv")
key_care_site_mapper = build_name_lookup_csv(population_care_site, care_site_csv,
["facility_name", "building_name",
"nurseunit_name", "hospitalservice_code_text"],
["facility_name", "building_name",
"nurseunit_name", "hospitalservice_code_text"], hashing_func=md5_func)
care_site_name_mapper = FunctionMapper(
build_key_func_dict(["facility_name", "building_name",
"nurseunit_name", "hospitalservice_code_text"], separator=" -- "))
care_site_rules = [("key_name", "k_care_site"),
(("facility_name", "building_name", "nurseunit_name", "hospitalservice_code_text"),
care_site_name_mapper,
{"mapped_value": "s_care_site_name"})]
source_care_site_csv = os.path.join(output_csv_directory, "source_care_site.csv")
care_site_runner_obj = generate_mapper_obj(care_site_csv, PopulationCareSite(), source_care_site_csv,
SourceCareSiteObject(), care_site_rules,
output_class_obj, in_out_map_obj)
care_site_runner_obj.run()
# Encounters
encounter_file_name = os.path.join(input_csv_directory, file_name_dict["encounter"])
encounter_id_duplicate_mapper = DuplicateExcludeMapper("encounterid")
encounter_rules = [
("encounterid", "s_encounter_id"),
("empiPersonId", "s_person_id"),
("servicedate", "s_visit_start_datetime"),
("dischargedate", "s_visit_end_datetime"),
("type_code_text", "s_visit_type"),
("classification_code_text", "m_visit_type"),
("dischargedisposition_code_text", "s_discharge_to"),
("dischargedisposition_code", "m_discharge_to"),
("admissionsource_code_text", "s_admitting_source"),
("admissionsource_code", "m_admitting_source"),
(("facility_name", "building_name", "nurseunit_name", "hospitalservice_code_text"), key_care_site_mapper, {"mapped_value": "k_care_site"}),
("encounterid", encounter_id_duplicate_mapper, {"i_exclude": "i_exclude"})
]
source_encounter_csv = os.path.join(output_csv_directory, "source_encounter.csv")
# Generate care site combination of tenant and hospitalservice_code_text
encounter_runner_obj = generate_mapper_obj(encounter_file_name, PopulationEncounter(), source_encounter_csv,
SourceEncounterObject(), encounter_rules,
output_class_obj, in_out_map_obj)
encounter_runner_obj.run()
observation_csv_file = os.path.join(input_csv_directory, "population_observation.csv")
generate_observation_period(source_encounter_csv, observation_csv_file,
"s_person_id", "s_visit_start_datetime", "s_visit_end_datetime")
observation_period_rules = [("s_person_id", "s_person_id"),
("s_visit_start_datetime", "s_start_observation_datetime"),
("s_visit_end_datetime", "s_end_observation_datetime")]
source_observation_period_csv = os.path.join(output_csv_directory, "source_observation_period.csv")
observation_runner_obj = generate_mapper_obj(observation_csv_file, PopulationObservationPeriod(),
source_observation_period_csv,
SourceObservationPeriodObject(), observation_period_rules,
output_class_obj, in_out_map_obj)
observation_runner_obj.run()
# Encounter plan or insurance coverage
source_encounter_coverage_csv = os.path.join(output_csv_directory, "source_encounter_coverage.csv")
encounter_coverage_rules = [("empiPersonId", "s_person_id"),
("encounterid", "s_encounter_id"),
("servicedate", "s_start_payer_date"),
("dischargedate", "s_end_payer_date"),
("financialclass_code_text", "s_payer_name"),
("financialclass_code_text", "m_payer_name"),
("financialclass_code_text", "s_plan_name"),
("financialclass_code_text", "m_plan_name")]
encounter_benefit_runner_obj = generate_mapper_obj(encounter_file_name,
PopulationEncounter(),
source_encounter_coverage_csv, SourceEncounterCoverageObject(),
encounter_coverage_rules, output_class_obj, in_out_map_obj)
encounter_benefit_runner_obj.run()
population_location_csv = os.path.join(input_csv_directory, "population_encounter_location.csv")
source_encounter_detail_csv = os.path.join(output_csv_directory, "source_encounter_detail.csv")
def check_if_not_empty(input_dict):
if "begindate" in input_dict:
if not len(input_dict["begindate"]):
return {"i_exclude": 1}
else:
return {"i_exclude": ""}
else:
return {"i_exclude": 1}
source_encounter_detail_rules = [
("encounterid", "s_encounter_id"),
("encounterid", "s_encounter_detail_id"),
("empiPersonId", "s_person_id"),
("begindate", "s_start_datetime"),
("enddate", "s_end_datetime"),
#("classification_display", "s_visit_detail_type"),
#("classification_display", "m_visit_detail_type"),
(("facility_name", "building_name", "nurseunit_name", "hospitalservice_code_text"), key_care_site_mapper, {"mapped_value": "k_care_site"}),
("begindate", PassThroughFunctionMapper(check_if_not_empty), {"i_exclude": "i_exclude"})
]
encounter_detail_runner_obj = generate_mapper_obj(population_location_csv, PopulationEncounterLocation(),
source_encounter_detail_csv,
SourceEncounterDetailObject(),
source_encounter_detail_rules, output_class_obj, in_out_map_obj)
encounter_detail_runner_obj.run()
def m_rank_func(input_dict):
if input_dict["billingrank"] == "PRIMARY":
return {"m_rank": "Primary"}
elif input_dict["billingrank"] == "SECONDARY":
return {"m_rank": "Secondary"}
else:
return {}
condition_rules = [("empiPersonId", "s_person_id"),
("encounterid", "s_encounter_id"),
("effectiveDate", "s_start_condition_datetime"),
("condition_code", "s_condition_code"),
("condition_code_oid", "m_condition_code_oid"),
("billingrank", PassThroughFunctionMapper(m_rank_func), {"m_rank": "m_rank"}),
("source", "s_condition_type"),
("presentonadmission_code", "s_present_on_admission_indicator")]
condition_csv = os.path.join(input_csv_directory, file_name_dict["condition"])
source_condition_csv = os.path.join(output_csv_directory, "source_condition.csv")
condition_mapper_obj = generate_mapper_obj(condition_csv, PopulationCondition(), source_condition_csv,
SourceConditionObject(),
condition_rules, output_class_obj, in_out_map_obj)
condition_mapper_obj.run()
procedure_csv = os.path.join(input_csv_directory, file_name_dict["procedure"])
source_procedure_csv = os.path.join(output_csv_directory, "source_procedure.csv")
procedure_rules = [("empiPersonId", "s_person_id"),
("encounterid", "s_encounter_id"),
("servicestartdate", "s_start_procedure_datetime"),
("serviceenddate", "s_end_procedure_datetime"),
("procedure_code", "s_procedure_code"),
("procedure_code_oid", "s_procedure_code_type"),
("procedure_code_oid", "m_procedure_code_oid")
]
procedure_mapper_obj = generate_mapper_obj(procedure_csv, PopulationProcedure(), source_procedure_csv,
SourceProcedureObject(),
procedure_rules, output_class_obj, in_out_map_obj)
procedure_mapper_obj.run()
def active_medications(input_dict):
if "status_code_text" in input_dict:
if input_dict["status_code_text"] not in ('Complete', 'Discontinued', 'Active', 'Suspended'):
return {"i_exclude": 1}
else:
return {}
else:
return {}
["medicationid", "encounterid", "empiPersonId", "intendeddispenser", "startdate", "stopdate", "doseunit_code",
"doseunit_code_oid", "doseunit_code_text", "category_id", "category_code_oid", "category_code_text",
"frequency_id", "frequency_code_oid", "frequency_code_text", "status_code", "status_code_oid",
"status_code_text", "route_code", "route_code_oid", "route_code_text", "drug_code", "drug_code_oid",
"drug_code_text", "dosequantity", "source"]
medication_rules = [("empiPersonId", "s_person_id"),
("encounterid", "s_encounter_id"),
("drug_code", "s_drug_code"),
("drug_code_oid", "m_drug_code_oid"),
("drug_code_text", "s_drug_text"),
("startdate", "s_start_medication_datetime"),
("stopdate", "s_end_medication_datetime"),
("route_code_text", "s_route"),
("route_code", "m_route"),
("dosequantity", "s_quantity"),
("doseunit_code_text", "s_dose_unit"),
("doseunit_code", "m_dose_unit"),
("intendeddispenser", "s_drug_type"),
("intendeddispenser", "m_drug_type"),
("status_code", "s_status"),
("status_code_text", PassThroughFunctionMapper(active_medications),
{"i_exclude": "i_exclude"})
]
medication_csv = os.path.join(input_csv_directory, file_name_dict["medication"])
source_medication_csv = os.path.join(output_csv_directory, "source_medication.csv")
medication_mapper_obj = generate_mapper_obj(medication_csv, PopulationMedication(), source_medication_csv,
SourceMedicationObject(), medication_rules,
output_class_obj, in_out_map_obj)
medication_mapper_obj.run()
result_csv = os.path.join(input_csv_directory, file_name_dict["result"])
source_result_csv = os.path.join(output_csv_directory, "source_result.csv")
["resultid", "encounterid", "empiPersonId", "result_code", "result_code_oid", "result_code_text",
"result_type", "servicedate", "value_text", "value_numeric", "value_numeric_modifier", "unit_code",
"unit_code_oid", "unit_code_text", "value_codified_code", "value_codified_code_oid",
"value_codified_code_text", "date", "interpretation_code", "interpretation_code_oid",
"interpretation_code_text", "specimen_type_code", "specimen_type_code_oid", "specimen_type_code_text",
"bodysite_code", "bodysite_code_oid", "bodysite_code_text", "specimen_collection_date",
"specimen_received_date", "measurementmethod_code", "measurementmethod_code_oid",
"measurementmethod_code_text", "recordertype", "issueddate", "year"]
def remove_equals(input):
return "".join(input["value_text"].split("="))
result_rules = [("empiPersonId", "s_person_id"),
("encounterid", "s_encounter_id"),
("servicedate", "s_obtained_datetime"),
("result_code_text", "s_name"),
("result_code", "s_code"),
("result_code_oid", "m_type_code_oid"),
("value_text", "s_result_text"),
(("value_codified_code_text", "interpretation_code_text"),
FilterHasKeyValueMapper(["value_codified_code_text", "interpretation_code_text"]),
{"value_codified_code_text": "m_result_text", "interpretation_code_text": "m_result_text"}),
(("value_numeric", "value_text"), CascadeMapper(FilterHasKeyValueMapper(["value_numeric"]),
ChainMapper(FunctionMapper(remove_equals, "value_text"), IntFloatMapper(), KeyTranslator({"value_text": "value_numeric"}))),
{"value_numeric": "s_result_numeric"}),
("date", "s_result_datetime"),
("value_codified_code", "s_result_code"),
("value_codified_code_oid", "m_result_code_oid"),
("unit_code", "s_result_unit"),
("unit_code", "s_result_unit_code"),
("unit_code_oid", "m_result_unit_code_oid"),
#("norm_unit_of_measure_code", "s_result_unit_code")
("lower_limit", "s_result_numeric_lower"),
("upper_limit", "s_result_numeric_upper")
]
result_mapper_obj = generate_mapper_obj(result_csv, PopulationResult(), source_result_csv, SourceResultObject(),
result_rules, output_class_obj, in_out_map_obj)
result_mapper_obj.run()
if __name__ == "__main__":
arg_parse_obj = argparse.ArgumentParser(description="Mapping Realworld CSV files to Prepared source format for OHDSI mapping")
arg_parse_obj.add_argument("-c", "--config-file-name", dest="config_file_name", help="JSON config file",
default="sbm_config.json")
arg_obj = arg_parse_obj.parse_args()
print("Reading config file '%s'" % arg_obj.config_file_name)
with open(arg_obj.config_file_name, "r") as f:
config_dict = json.load(f)
file_name_dict = {
"demographic": "population_demographics.consolidated.csv",
"encounter": "population_encounter.csv",
"condition": "population_condition.csv",
"measurement": "population_measurement.csv",
"medication": "population_medication.csv",
"procedure": "population_procedure.csv",
"result": "population_results.csv"
}
main(config_dict["csv_input_directory"], config_dict["csv_input_directory"], file_name_dict)
|
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Generate CBIS-DDSM like files, smaller and with fake data.
"""
import csv
import os
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
from tensorflow_datasets.core.utils import py_utils
import tensorflow_datasets.public_api as tfds
flags.DEFINE_string('tfds_dir', py_utils.tfds_dir(),
'Path to tensorflow_datasets directory')
FLAGS = flags.FLAGS
MAMMOGRAPHY_HEIGHT = 100 # Note: Much smaller than original images.
MAMMOGRAPHY_WIDTH = 80
ABNORMALITY_SIZE_MIN = 10
ABNORMALITY_SIZE_MAX = 20
NUM_ABNORMALITIES_MAX = 5
BREAST_INTENSITY_MIN = 20
BREAST_INTENSITY_MAX = 200
def remake_dirs(path):
if not tf.io.gfile.exists(path):
tf.io.gfile.makedirs(path)
def write_png(filepath, img):
cv2 = tfds.core.lazy_imports.cv2
with tf.io.gfile.GFile(filepath, 'wb') as f:
_, buf = cv2.imencode('.png', img, (cv2.IMWRITE_PNG_COMPRESSION, 9))
f.write(buf.tobytes())
def _yield_mammography_with_abnormalities():
"""Generate a fake mammography image containing a set of abnormalities."""
mammography = np.zeros((MAMMOGRAPHY_HEIGHT, MAMMOGRAPHY_WIDTH),
dtype=np.uint8)
# Draw a rectangle representing the breast region.
breast_h = np.random.randint(
int(MAMMOGRAPHY_HEIGHT * 0.7),
int(MAMMOGRAPHY_HEIGHT * 0.9) + 1)
breast_w = np.random.randint(
int(MAMMOGRAPHY_WIDTH * 0.7),
int(MAMMOGRAPHY_WIDTH * 0.9) + 1)
breast_y = np.random.randint(0, MAMMOGRAPHY_HEIGHT - breast_h)
breast_x = np.random.randint(0, MAMMOGRAPHY_WIDTH - breast_w)
breast_intensity = np.random.randint(BREAST_INTENSITY_MIN,
BREAST_INTENSITY_MAX + 1)
mammography[breast_y:(breast_y + breast_h),
breast_x:(breast_x + breast_w)] = breast_intensity
abnormalities = [] # Note: pairs of (mask, crop).
for _ in range(np.random.randint(1, NUM_ABNORMALITIES_MAX + 1)):
abnorm_h = np.random.randint(ABNORMALITY_SIZE_MIN, ABNORMALITY_SIZE_MAX + 1)
abnorm_w = np.random.randint(ABNORMALITY_SIZE_MIN, ABNORMALITY_SIZE_MAX + 1)
abnorm_y = np.random.randint(0, breast_h - abnorm_h) + breast_y
abnorm_x = np.random.randint(0, breast_w - abnorm_w) + breast_x
abnorm_intensity = np.random.randint(int(BREAST_INTENSITY_MIN * 1.2), 256)
while np.absolute(abnorm_intensity - breast_intensity) < 10:
abnorm_intensity = np.random.randint(int(BREAST_INTENSITY_MIN * 1.2), 256)
# Draw abnormality in the mammography.
mammography[abnorm_y:(abnorm_y + abnorm_h),
abnorm_x:(abnorm_x + abnorm_w)] = abnorm_intensity
# Abnormality mask w.r.t the full mammography.
abnorm_mask = np.zeros_like(mammography)
abnorm_mask[abnorm_y:(abnorm_y + abnorm_h),
abnorm_x:(abnorm_x + abnorm_w)] = 255
# Abnormality crop.
abnorm_crop = np.ones((abnorm_h, abnorm_w)) * abnorm_intensity
abnormalities.append((abnorm_mask, abnorm_crop))
return mammography, abnormalities
def _yield_csv_rows_base(output_dir, row_extra_info_gen_fn):
"""Yield rows for the CSV ground-truth files, also creates fake images."""
mammography, abnormalities = _yield_mammography_with_abnormalities()
patient_id = 'P_%05d' % np.random.randint(0, 1000)
breast_density = np.random.randint(1, 5)
left_or_right_breast = np.random.choice(['LEFT', 'RIGHT'])
image_view = np.random.choice(['CC', 'MLO'])
study_id = tuple(np.random.randint(0, 999999999 + 1, size=2))
study_id = '1.3.6.1.4.1.9590.100.1.2.%010d.%010d' % study_id
series_id = tuple(np.random.randint(0, 999999999 + 1, size=2))
series_id = '1.3.6.1.4.1.9590.100.1.2.%010d.%010d' % series_id
remake_dirs(os.path.join(output_dir, '%s/%s' % (study_id, series_id)))
# Write mammography image.
mammography_basename = '%s/%s/000000' % (study_id, series_id)
write_png(
os.path.join(output_dir, mammography_basename + '.png'), mammography)
for abnormality_id, abnormality in enumerate(abnormalities, 1):
# Write abnormality crop image.
crop_basename = '%s/%s/%06d' % (study_id, series_id, abnormality_id * 2 - 1)
write_png(os.path.join(output_dir, crop_basename + '.png'), abnormality[1])
# Write abnormality mask image.
mask_basename = '%s/%s/%06d' % (study_id, series_id, abnormality_id * 2)
write_png(os.path.join(output_dir, mask_basename + '.png'), abnormality[0])
row = {
'patient_id':
patient_id,
'breast density':
breast_density,
'left or right breast':
left_or_right_breast,
'image view':
image_view,
'abnormality id':
abnormality_id,
'abnormality type':
'calcification',
'assessment':
np.random.randint(1, 5),
'pathology':
np.random.choice(['BENIGN', 'BENIGN_WITHOUT_CALLBACK',
'MALIGNANT']),
'subtlety':
np.random.randint(1, 5),
'image file path':
mammography_basename + '.dcm',
'cropped image file path':
crop_basename + '.dcm',
'ROI mask file path':
mask_basename + '.dcm',
}
row.update(row_extra_info_gen_fn())
yield row
def _yield_csv_rows_calc(output_dir, calc_types, calc_distributions):
"""Generate a row for the calcification abnormalities."""
def _row_extra_info_gen_fn():
return {
'calc type': np.random.choice(calc_types),
'calc distribution': np.random.choice(calc_distributions),
}
return _yield_csv_rows_base(output_dir, _row_extra_info_gen_fn)
def _yield_csv_rows_mass(output_dir, mass_shapes, mass_margins):
"""Generate a row for the mass abnormalities."""
def _row_extra_info_gen_fn():
return {
'mass shape': np.random.choice(mass_shapes),
'mass margins': np.random.choice(mass_margins),
}
return _yield_csv_rows_base(output_dir, _row_extra_info_gen_fn)
def _generate_csv(csv_filepath, row_yielder, number_of_mammograms):
"""Generate a csv file with `number_of_examples` mammograms."""
with tf.io.gfile.GFile(os.path.join(csv_filepath), 'w') as f:
writer = None
for _ in range(number_of_mammograms):
for row in row_yielder():
if writer is None:
writer = csv.DictWriter(f, row.keys())
writer.writeheader()
writer.writerow(row)
def _generate_data_calc(output_dir, number_of_mammograms):
"""Generate train/test CSV and images of calcification abnormalities."""
calc_types = tfds.features.ClassLabel(
names_file=tfds.core.tfds_path(
os.path.join('image', 'cbis_ddsm_calc_types.txt'))).names
calc_distributions = tfds.features.ClassLabel(
names_file=tfds.core.tfds_path(
os.path.join('image', 'cbis_ddsm_calc_distributions.txt'))).names
_generate_csv(
os.path.join(output_dir, 'calc_case_description_train_set.csv'),
lambda: _yield_csv_rows_calc(output_dir, calc_types, calc_distributions),
number_of_mammograms[0])
_generate_csv(
os.path.join(output_dir, 'calc_case_description_test_set.csv'),
lambda: _yield_csv_rows_calc(output_dir, calc_types, calc_distributions),
number_of_mammograms[1])
def _generate_data_mass(output_dir, number_of_mammograms):
"""Generate train/test CSV and images of mass abnormalities."""
mass_shapes = tfds.features.ClassLabel(
names_file=tfds.core.tfds_path(
os.path.join('image', 'cbis_ddsm_mass_shapes.txt'))).names
mass_margins = tfds.features.ClassLabel(
names_file=tfds.core.tfds_path(
os.path.join('image', 'cbis_ddsm_mass_margins.txt'))).names
_generate_csv(
os.path.join(output_dir, 'mass_case_description_train_set.csv'),
lambda: _yield_csv_rows_mass(output_dir, mass_shapes, mass_margins),
number_of_mammograms[0])
_generate_csv(
os.path.join(output_dir, 'mass_case_description_test_set.csv'),
lambda: _yield_csv_rows_mass(output_dir, mass_shapes, mass_margins),
number_of_mammograms[1])
def main(_):
output_dir = os.path.join(FLAGS.tfds_dir, 'testing', 'test_data',
'fake_examples', 'curated_breast_imaging_ddsm')
np.random.seed(0x12345)
_generate_data_calc(output_dir, (3, 2))
_generate_data_mass(output_dir, (3, 2))
if __name__ == '__main__':
app.run(main)
|
|
XXXXXXXXX XXXXX
XXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX XXXXXXX X XXXXX XXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXX X
X XXXXXXX XXXXXX XX
X XXXXXXX XXXXXXXXXX XX
X XXXXXXX XXXXXXXX XX
X XXXXXXX XXXXX XX
X XXXXXXX XXXXXXXXXXXX XX
X XXXXXXX XXXXXXXX XX
X XXXXXXX XXXXXX X
X
X XX
X XX
XXXXXXXXX
XXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXX
XXXXXXXXXXXXXX XXXXXXX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXX
XXXXXX XXX XXXX XXXXXXXXXX XXXX XXXXXXX XX XXXX XX XX XXXXXXX XX XXXXXXXXX XXXXXXX XXXXXXX XXX XXX XX XXXXX XXXXX XX XXX XXXXXXXX XXX XXXXXXXXXXX XXX XXXXXXXXXX
XXXXXX XXXXXXXXXXX XXX XXX XXXXXXXX XXX XXX XXX XXXXXX XXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX X XXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX XXX XXXXX X XXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXX XXXXX X XXXX XXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX XXX XXXXX X XXXX XXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX X XXXX XXXXXXXX
XXXXX
XXXXXX XXX XXXXX XXX XXXXXXXXXXXXXXXXXX XXXXXX XXXX XXX XXX XX XXX XXXX XX XXXX XXXXXX XXX XXXXXX XX XXXXXX XXXXXXXX XXXXXXXXXXXX XX XXX XXX XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX XX XXXXXXXXXXXXXXXXX XX X XXXXXX XX XXX XXXXX XXXX XX XXXXXXX XXXXX XXXXXXXXXXX XXX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXX X XXXXXX XX
XXXXXX XX XXXXXXX XX XXXXXXX XXX XXXXXX XXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXX XX XXXXX X XXXXXXXXXX XXXXX XX XXX XXX XXX XXXXXXX XXXXXX XX XX
XXXXXX XXX XX XXXXX XX XXX XXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXXX XXXXX XXX XXXXX XXXXXXX XXXXX XXXXXXXXXX
XXX XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXX X XXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXX X XXXXX XXXXXXX XX X XXXXXXX XXX XXXX XX XXX XXXXX XXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX X XXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX X XXXX XXX XXX XXXXXXXXXXXXXXXXXXX XX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXX
XXXXX
XXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXX XXXXXXXXXX XXXXXXXXXX XXX XXXXXXX XXXXXXX XX XXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXX XXXXX XXX XXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXX XX X XXXXXXX XXXX X XXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXX XXXXXXX X XXXXXXX XXXXX XXX
XXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXX X XXXXX XX XXXXXXXXX XX XXXXXXX XXX XXXXXXXXXXXXXXXXX XXXXXX XX XXXXXX XXXXXXX XXX XXX XXXXXXXXXXX XXXXXXXXXXX XX XXXXX
XXXXXXXXXXXX
XXXXXXX XXX XX XXXXXX XX XXX XXXX XX XXXXXX XXX XXXXXX XX XXX XXXX XXXXX XX XXXXXXXXX XXXXXXXX XXX XXXXXXX XXXXX X XXXXXXXXXXX XX XXXXXXXXXXXXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXX XX XXX XXXXXXXXX XXXXXXX XXXXX XXXXX XXXXXXXXXXX XX XX XXXXXXXXX XXXXXXXX XXX XXXX XXXXXX XXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX
XXXX
XXX XXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXX XXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXXX
XXX XXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXX
XXXXX
XXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXX
XXXXXX XXXXXXXXXX XXXXX XXXXX XX XXXX XX XXXXXXXXXX XXX XXXXX XXXXX XX XXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXX X
X XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XX
X XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX XX
X XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXX XX
X XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XX
X XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX XX
X XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXX XX
X XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX X
X
X XX
X XXXXXXXXX
XXXXX XXXXXXXX XX XXX XXXXX XXXXX XXX XXXXXXXXX XXXXXXXXXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX XXXXXXXXXXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXX XXXX XXXXX XXXXX XX XXX XXX XXXX XXXXX XXXXXXXX XXXXXX XX XXX XXXX XXXXXXXX XX XXXXXXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXX
XXXXXXX XXXXXXX XXXX X XXXXXX XXX XX XXXXXXXXXX XXX XXXXXX XXXX XX XXXXXX XXXX XXX XXXXXXX XXXXX XXXXXXXX XX XXXXX XX XXXXXXXXX XXXXXXX XXX XXXXXX XXX
XXXXXXXXXX XXX XXXX XX XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXXXX XXXXXXXXX XXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX XXXXXXX XX XXXXXXX XXX XXXXXXX XX XXX XXXXXXXXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXX
XXXXXXX XXXXX XXXXX XXXX XX XXXXX XXX XXXXXX XXXX XXXX XXX XXXX XXXXXX XX XXXXX XXXXXX XXXX XXXX XXXX XXXXXX XXXXXXXXXXXXX XX XXX XXXXXXXXXX XXXX XX
XXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXXX XXXXXX XXXX XX XXXXXXX XXX XXXXXXXXXXX XXXXXXXXXX XXX XXXX XXXXX XX XXXXX XXXXXX XXXXXX XXXX XXXX XXXX XX XXXX XX XXXXXXX XXXXXX XXXXX XXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXX XXX XX XXXXXXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXXXXXXXX XX XXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXX
XXXXXXXXX
XXXX XXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXX XXXXXXXXXX XXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXXXX X XXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXX XXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX X XXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXX XXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXX XXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXX
XXXXXXXXX XXXXX XX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX XXX XXXX XXXXXXXXXXX XXXXX XXX XXX XXXXXXXXXX XXX XXXXXXXXXXXX
XXXXXXXXXXXXX XXXXX XXX X XXXX XXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXX XXX XXXXXXXXXXXX XX XXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXX XXXXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXX XXXXXXXXXXXXX
XXXXXXXXXX XX XXXXXXXX XXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXXX
XXXXXXX
|
|
"""
test_utils.py
adds WebCase for testing Howl server functionality
"""
import sys, time,socket
import unittest
import commands
import pprint
import simplejson
import httplib2
import howl
class WebCase(unittest.TestCase):
"""
Modification of the CherryPy WebCase tests.
Defines the self.getPage method to request a page from Howl
"""
def __init__(self,*args):
unittest.TestCase.__init__(self,*args)
self.HOST = 'localhost'
self.PORT = howl.config.port
self.username = "test_user"
self.password = "test"
self.request_headers = {'Content-Type':'application/json',
'Accept':'application/json'}
self.client = httplib2.Http()
self.client.add_credentials(self.username,self.password)
self.client.follow_redirects = True
def getPage(self, uri, headers={}, method="GET", body=None, convert_body=True):
"""Open the uri with debugging support. Return status, headers, body."""
if not uri.startswith("http"):
self.uri = "http://%s:%s/%s"% (self.HOST,self.PORT,uri)
else:
self.uri = uri
headers.update(self.request_headers)
if body and not type(body) == str:
body = simplejson.dumps(body)
# Trying 10 times is simply in case of socket errors.
# Normal case--it should run once.
result = None
for trial in xrange(10):
try:
result = self.client.request(self.uri,method,body=body,headers=headers)
break
except socket.error:
time.sleep(0.5)
try:
self.headers, self.body = result
except TypeError:
self._handlewebError('Unable to reach server')
self.status = self.headers['status']
if self.body and convert_body:
try:
self.body = simplejson.loads(self.body)
except:
self._handlewebError(self._exc_info()[1])
interactive = True
console_height = 30
def _handlewebError(self, msg):
print
print "ERROR:", msg
if not self.interactive:
raise self.failureException(msg)
p = "Show: [B]ody [H]eaders [S]tatus [U]RI; [I]gnore, [R]aise, or sys.e[X]it >> "
print p,
while True:
i = getchar().upper()
if i not in "BHSUIRX":
continue
print i.upper() # Also prints new line
if i == "B":
pretty_print_error(self.body)
elif i == "H":
pprint.pprint(self.headers)
elif i == "S":
print self.status
elif i == "U":
print self.uri
elif i == "I":
# return without raising the normal exception
return
elif i == "R":
raise self.failureException(msg)
elif i == "X":
self.exit()
print p,
def exit(self):
sys.exit()
def __call__(self, result=None):
if result is None:
result = self.defaultTestResult()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
try:
try:
self.setUp()
except (KeyboardInterrupt, SystemExit):
raise
except:
result.addError(self, self._exc_info())
return
ok = 0
try:
testMethod()
ok = 1
except self.failureException:
result.addFailure(self, self._exc_info())
except (KeyboardInterrupt, SystemExit):
raise
except:
result.addError(self, self._exc_info())
try:
self.tearDown()
except (KeyboardInterrupt, SystemExit):
raise
except:
result.addError(self, self._exc_info())
ok = 0
if ok:
result.addSuccess(self)
finally:
result.stopTest(self)
def assertStatus(self, status, msg=None):
"""Fail if self.status != status."""
if isinstance(status, basestring):
if not self.status == status:
if msg is None:
msg = 'Status (%s) != %s' % (`self.status`, `status`)
self._handlewebError(msg)
elif isinstance(status, int):
code = int(self.status[:3])
if code != status:
if msg is None:
msg = 'Status (%s) != %s' % (`self.status`, `status`)
self._handlewebError(msg)
def assertHeader(self, key, value=None, msg=None):
"""Fail if (key, [value]) not in self.headers."""
lowkey = key.lower()
for k, v in self.headers.iteritems():
if k.lower() == lowkey:
if value is None or str(value) == v:
return v
if msg is None:
if value is None:
msg = '%s not in headers' % `key`
else:
msg = '%s:%s not in headers' % (`key`, `value`)
self._handlewebError(msg)
def assertNoHeader(self, key, msg=None):
"""Fail if key in self.headers."""
lowkey = key.lower()
matches = [k for k, v in self.headers.iteritems() if k.lower() == lowkey]
if matches:
if msg is None:
msg = '%s in headers' % `key`
self._handlewebError(msg)
def assertValidBody(self, json_type, msg=None):
"""Fail if self.body is not valid json"""
try:
simplejson.dumps(self.body)
except:
if msg is None:
msg = format_exc()
self._handlewebError(msg)
try:
self.assert_(self.body.keys()[0] == json_type)
except:
if msg is None:
msg = "Body is not of type '%s'" % json_type
self._handlewebError(msg)
def assertResponse(self,status,body_type):
self.assertStatus(status)
self.assertValidBody(body_type)
def format_exc(exc=None):
"""Return exc (or sys.exc_info if None), formatted."""
if exc is None:
exc = sys.exc_info()
if exc == (None, None, None):
return ""
import traceback
return "".join(traceback.format_exception(*exc))
def pretty_print_error(error):
if type(error) == dict:
print "{'error':"
print "{'status': 500,"
print "'message':"
print error['error']['message']
if error.has_key('traceback'):
print "'traceback':"
print error['error']['traceback']
print "}"
else:
pprint.pprint(error)
try:
# On Windows, msvcrt.getch reads a single char without output.
import msvcrt
def getchar():
return msvcrt.getch()
except ImportError:
# Unix getchr
import tty, termios
def getchar():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def print_timing(func):
def wrapper(*arg):
t1 = time.time()
res = func(*arg)
t2 = time.time()
print '%s took %0.3f ms' % (func.func_name, (t2-t1)*1000.0)
return res
return wrapper
|
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import time
import datetime
import logging
import os
import getpass
from dateutil.parser import parse
from dateutil.tz import tzlocal
import botocore.config
import botocore.compat
from botocore.compat import total_seconds
from botocore.exceptions import UnknownCredentialError
from botocore.exceptions import PartialCredentialsError
from botocore.exceptions import ConfigNotFound
from botocore.exceptions import InvalidConfigError
from botocore.exceptions import RefreshWithMFAUnsupportedError
from botocore.utils import InstanceMetadataFetcher, parse_key_val_file
logger = logging.getLogger(__name__)
def create_credential_resolver(session):
"""Create a default credential resolver.
This creates a pre-configured credential resolver
that includes the default lookup chain for
credentials.
"""
profile_name = session.get_config_variable('profile') or 'default'
credential_file = session.get_config_variable('credentials_file')
config_file = session.get_config_variable('config_file')
metadata_timeout = session.get_config_variable('metadata_service_timeout')
num_attempts = session.get_config_variable('metadata_service_num_attempts')
env_provider = EnvProvider()
providers = [
env_provider,
AssumeRoleProvider(
load_config=lambda: session.full_config,
client_creator=session.create_client,
cache={},
profile_name=profile_name,
),
SharedCredentialProvider(
creds_filename=credential_file,
profile_name=profile_name
),
# The new config file has precedence over the legacy
# config file.
ConfigProvider(config_filename=config_file, profile_name=profile_name),
OriginalEC2Provider(),
BotoProvider(),
InstanceMetadataProvider(
iam_role_fetcher=InstanceMetadataFetcher(
timeout=metadata_timeout,
num_attempts=num_attempts)
)
]
explicit_profile = session.get_config_variable('profile',
methods=('instance',))
if explicit_profile is not None:
# An explicitly provided profile will negate an EnvProvider.
# We will defer to providers that understand the "profile"
# concept to retrieve credentials.
# The one edge case if is all three values are provided via
# env vars:
# export AWS_ACCESS_KEY_ID=foo
# export AWS_SECRET_ACCESS_KEY=bar
# export AWS_PROFILE=baz
# Then, just like our client() calls, the explicit credentials
# will take precedence.
#
# This precedence is enforced by leaving the EnvProvider in the chain.
# This means that the only way a "profile" would win is if the
# EnvProvider does not return credentials, which is what we want
# in this scenario.
providers.remove(env_provider)
else:
logger.debug('Skipping environment variable credential check'
' because profile name was explicitly set.')
resolver = CredentialResolver(providers=providers)
return resolver
def get_credentials(session):
resolver = create_credential_resolver(session)
return resolver.load_credentials()
def _local_now():
return datetime.datetime.now(tzlocal())
def _parse_if_needed(value):
if isinstance(value, datetime.datetime):
return value
return parse(value)
def _serialize_if_needed(value):
if isinstance(value, datetime.datetime):
return value.strftime('%Y-%m-%dT%H:%M:%SZ')
return value
def create_assume_role_refresher(client, params):
def refresh():
response = client.assume_role(**params)
credentials = response['Credentials']
# We need to normalize the credential names to
# the values expected by the refresh creds.
return {
'access_key': credentials['AccessKeyId'],
'secret_key': credentials['SecretAccessKey'],
'token': credentials['SessionToken'],
'expiry_time': _serialize_if_needed(credentials['Expiration']),
}
return refresh
def create_mfa_serial_refresher():
def _refresher():
# We can explore an option in the future to support
# reprompting for MFA, but for now we just error out
# when the temp creds expire.
raise RefreshWithMFAUnsupportedError()
return _refresher
class Credentials(object):
"""
Holds the credentials needed to authenticate requests.
:ivar access_key: The access key part of the credentials.
:ivar secret_key: The secret key part of the credentials.
:ivar token: The security token, valid only for session credentials.
:ivar method: A string which identifies where the credentials
were found.
"""
def __init__(self, access_key, secret_key, token=None,
method=None):
self.access_key = access_key
self.secret_key = secret_key
self.token = token
if method is None:
method = 'explicit'
self.method = method
self._normalize()
def _normalize(self):
# Keys would sometimes (accidentally) contain non-ascii characters.
# It would cause a confusing UnicodeDecodeError in Python 2.
# We explicitly convert them into unicode to avoid such error.
#
# Eventually the service will decide whether to accept the credential.
# This also complies with the behavior in Python 3.
self.access_key = botocore.compat.ensure_unicode(self.access_key)
self.secret_key = botocore.compat.ensure_unicode(self.secret_key)
class RefreshableCredentials(Credentials):
"""
Holds the credentials needed to authenticate requests. In addition, it
knows how to refresh itself.
:ivar refresh_timeout: How long a given set of credentials are valid for.
Useful for credentials fetched over the network.
:ivar access_key: The access key part of the credentials.
:ivar secret_key: The secret key part of the credentials.
:ivar token: The security token, valid only for session credentials.
:ivar method: A string which identifies where the credentials
were found.
:ivar session: The ``Session`` the credentials were created for. Useful for
subclasses.
"""
refresh_timeout = 15 * 60
def __init__(self, access_key, secret_key, token,
expiry_time, refresh_using, method,
time_fetcher=_local_now):
self._refresh_using = refresh_using
self._access_key = access_key
self._secret_key = secret_key
self._token = token
self._expiry_time = expiry_time
self._time_fetcher = time_fetcher
self.method = method
self._normalize()
def _normalize(self):
self._access_key = botocore.compat.ensure_unicode(self._access_key)
self._secret_key = botocore.compat.ensure_unicode(self._secret_key)
@classmethod
def create_from_metadata(cls, metadata, refresh_using, method):
instance = cls(
access_key=metadata['access_key'],
secret_key=metadata['secret_key'],
token=metadata['token'],
expiry_time=cls._expiry_datetime(metadata['expiry_time']),
method=method,
refresh_using=refresh_using
)
return instance
@property
def access_key(self):
self._refresh()
return self._access_key
@access_key.setter
def access_key(self, value):
self._access_key = value
@property
def secret_key(self):
self._refresh()
return self._secret_key
@secret_key.setter
def secret_key(self, value):
self._secret_key = value
@property
def token(self):
self._refresh()
return self._token
@token.setter
def token(self, value):
self._token = value
def _seconds_remaining(self):
delta = self._expiry_time - self._time_fetcher()
return total_seconds(delta)
def refresh_needed(self):
if self._expiry_time is None:
# No expiration, so assume we don't need to refresh.
return False
# The credentials should be refreshed if they're going to expire
# in less than 5 minutes.
if self._seconds_remaining() >= self.refresh_timeout:
# There's enough time left. Don't refresh.
return False
# Assume the worst & refresh.
logger.debug("Credentials need to be refreshed.")
return True
def _refresh(self):
if not self.refresh_needed():
return
metadata = self._refresh_using()
self._set_from_data(metadata)
@staticmethod
def _expiry_datetime(time_str):
return parse(time_str)
def _set_from_data(self, data):
self.access_key = data['access_key']
self.secret_key = data['secret_key']
self.token = data['token']
self._expiry_time = parse(data['expiry_time'])
logger.debug("Retrieved credentials will expire at: %s", self._expiry_time)
self._normalize()
class CredentialProvider(object):
# Implementations must provide a method.
METHOD = None
def __init__(self, session=None):
self.session = session
def load(self):
"""
Loads the credentials from their source & sets them on the object.
Subclasses should implement this method (by reading from disk, the
environment, the network or wherever), returning ``True`` if they were
found & loaded.
If not found, this method should return ``False``, indictating that the
``CredentialResolver`` should fall back to the next available method.
The default implementation does nothing, assuming the user has set the
``access_key/secret_key/token`` themselves.
:returns: Whether credentials were found & set
:rtype: boolean
"""
return True
def _extract_creds_from_mapping(self, mapping, *key_names):
found = []
for key_name in key_names:
try:
found.append(mapping[key_name])
except KeyError:
raise PartialCredentialsError(provider=self.METHOD,
cred_var=key_name)
return found
class InstanceMetadataProvider(CredentialProvider):
METHOD = 'iam-role'
def __init__(self, iam_role_fetcher):
self._role_fetcher = iam_role_fetcher
def load(self):
fetcher = self._role_fetcher
# We do the first request, to see if we get useful data back.
# If not, we'll pass & move on to whatever's next in the credential
# chain.
metadata = fetcher.retrieve_iam_role_credentials()
if not metadata:
return None
logger.info('Found credentials from IAM Role: %s', metadata['role_name'])
# We manually set the data here, since we already made the request &
# have it. When the expiry is hit, the credentials will auto-refresh
# themselves.
creds = RefreshableCredentials.create_from_metadata(
metadata,
method=self.METHOD,
refresh_using=fetcher.retrieve_iam_role_credentials,
)
return creds
class EnvProvider(CredentialProvider):
METHOD = 'env'
ACCESS_KEY = 'AWS_ACCESS_KEY_ID'
SECRET_KEY = 'AWS_SECRET_ACCESS_KEY'
# The token can come from either of these env var.
# AWS_SESSION_TOKEN is what other AWS SDKs have standardized on.
TOKENS = ['AWS_SECURITY_TOKEN', 'AWS_SESSION_TOKEN']
def __init__(self, environ=None, mapping=None):
"""
:param environ: The environment variables (defaults to
``os.environ`` if no value is provided).
:param mapping: An optional mapping of variable names to
environment variable names. Use this if you want to
change the mapping of access_key->AWS_ACCESS_KEY_ID, etc.
The dict can have up to 3 keys: ``access_key``, ``secret_key``,
``session_token``.
"""
if environ is None:
environ = os.environ
self.environ = environ
self._mapping = self._build_mapping(mapping)
def _build_mapping(self, mapping):
# Mapping of variable name to env var name.
var_mapping = {}
if mapping is None:
# Use the class var default.
var_mapping['access_key'] = self.ACCESS_KEY
var_mapping['secret_key'] = self.SECRET_KEY
var_mapping['token'] = self.TOKENS
else:
var_mapping['access_key'] = mapping.get(
'access_key', self.ACCESS_KEY)
var_mapping['secret_key'] = mapping.get(
'secret_key', self.SECRET_KEY)
var_mapping['token'] = mapping.get(
'token', self.TOKENS)
if not isinstance(var_mapping['token'], list):
var_mapping['token'] = [var_mapping['token']]
return var_mapping
def load(self):
"""
Search for credentials in explicit environment variables.
"""
if self._mapping['access_key'] in self.environ:
logger.info('Found credentials in environment variables.')
access_key, secret_key = self._extract_creds_from_mapping(
self.environ, self._mapping['access_key'],
self._mapping['secret_key'])
token = self._get_session_token()
return Credentials(access_key, secret_key, token,
method=self.METHOD)
else:
return None
def _get_session_token(self):
for token_envvar in self._mapping['token']:
if token_envvar in self.environ:
return self.environ[token_envvar]
class OriginalEC2Provider(CredentialProvider):
METHOD = 'ec2-credentials-file'
CRED_FILE_ENV = 'AWS_CREDENTIAL_FILE'
ACCESS_KEY = 'AWSAccessKeyId'
SECRET_KEY = 'AWSSecretKey'
def __init__(self, environ=None, parser=None):
if environ is None:
environ = os.environ
if parser is None:
parser = parse_key_val_file
self._environ = environ
self._parser = parser
def load(self):
"""
Search for a credential file used by original EC2 CLI tools.
"""
if 'AWS_CREDENTIAL_FILE' in self._environ:
full_path = os.path.expanduser(self._environ['AWS_CREDENTIAL_FILE'])
creds = self._parser(full_path)
if self.ACCESS_KEY in creds:
logger.info('Found credentials in AWS_CREDENTIAL_FILE.')
access_key = creds[self.ACCESS_KEY]
secret_key = creds[self.SECRET_KEY]
# EC2 creds file doesn't support session tokens.
return Credentials(access_key, secret_key, method=self.METHOD)
else:
return None
class SharedCredentialProvider(CredentialProvider):
METHOD = 'shared-credentials-file'
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
# Same deal as the EnvProvider above. Botocore originally supported
# aws_security_token, but the SDKs are standardizing on aws_session_token
# so we support both.
TOKENS = ['aws_security_token', 'aws_session_token']
def __init__(self, creds_filename, profile_name=None, ini_parser=None):
self._creds_filename = creds_filename
if profile_name is None:
profile_name = 'default'
self._profile_name = profile_name
if ini_parser is None:
ini_parser = botocore.config.raw_config_parse
self._ini_parser = ini_parser
def load(self):
try:
available_creds = self._ini_parser(self._creds_filename)
except ConfigNotFound:
return None
if self._profile_name in available_creds:
config = available_creds[self._profile_name]
if self.ACCESS_KEY in config:
logger.info("Found credentials in shared credentials file: %s",
self._creds_filename)
access_key, secret_key = self._extract_creds_from_mapping(
config, self.ACCESS_KEY, self.SECRET_KEY)
token = self._get_session_token(config)
return Credentials(access_key, secret_key, token,
method=self.METHOD)
def _get_session_token(self, config):
for token_envvar in self.TOKENS:
if token_envvar in config:
return config[token_envvar]
class ConfigProvider(CredentialProvider):
"""INI based config provider with profile sections."""
METHOD = 'config-file'
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
# Same deal as the EnvProvider above. Botocore originally supported
# aws_security_token, but the SDKs are standardizing on aws_session_token
# so we support both.
TOKENS = ['aws_security_token', 'aws_session_token']
def __init__(self, config_filename, profile_name, config_parser=None):
"""
:param config_filename: The session configuration scoped to the current
profile. This is available via ``session.config``.
:param profile_name: The name of the current profile.
:param config_parser: A config parser callable.
"""
self._config_filename = config_filename
self._profile_name = profile_name
if config_parser is None:
config_parser = botocore.config.load_config
self._config_parser = config_parser
def load(self):
"""
If there is are credentials in the configuration associated with
the session, use those.
"""
try:
full_config = self._config_parser(self._config_filename)
except ConfigNotFound:
return None
if self._profile_name in full_config['profiles']:
profile_config = full_config['profiles'][self._profile_name]
if self.ACCESS_KEY in profile_config:
logger.info("Credentials found in config file: %s",
self._config_filename)
access_key, secret_key = self._extract_creds_from_mapping(
profile_config, self.ACCESS_KEY, self.SECRET_KEY)
token = self._get_session_token(profile_config)
return Credentials(access_key, secret_key, token,
method=self.METHOD)
else:
return None
def _get_session_token(self, profile_config):
for token_name in self.TOKENS:
if token_name in profile_config:
return profile_config[token_name]
class BotoProvider(CredentialProvider):
METHOD = 'boto-config'
BOTO_CONFIG_ENV = 'BOTO_CONFIG'
DEFAULT_CONFIG_FILENAMES = ['/etc/boto.cfg', '~/.boto']
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
def __init__(self, environ=None, ini_parser=None):
if environ is None:
environ = os.environ
if ini_parser is None:
ini_parser = botocore.config.raw_config_parse
self._environ = environ
self._ini_parser = ini_parser
def load(self):
"""
Look for credentials in boto config file.
"""
if self.BOTO_CONFIG_ENV in self._environ:
potential_locations = [self._environ[self.BOTO_CONFIG_ENV]]
else:
potential_locations = self.DEFAULT_CONFIG_FILENAMES
for filename in potential_locations:
try:
config = self._ini_parser(filename)
except ConfigNotFound:
# Move on to the next potential config file name.
continue
if 'Credentials' in config:
credentials = config['Credentials']
if self.ACCESS_KEY in credentials:
logger.info("Found credentials in boto config file: %s",
filename)
access_key, secret_key = self._extract_creds_from_mapping(
credentials, self.ACCESS_KEY, self.SECRET_KEY)
return Credentials(access_key, secret_key,
method=self.METHOD)
class AssumeRoleProvider(CredentialProvider):
METHOD = 'assume-role'
ROLE_CONFIG_VAR = 'role_arn'
# Credentials are considered expired (and will be refreshed) once the total
# remaining time left until the credentials expires is less than the
# EXPIRY_WINDOW.
EXPIRY_WINDOW_SECONDS = 60 * 15
def __init__(self, load_config, client_creator, cache, profile_name,
prompter=getpass.getpass):
"""
:type load_config: callable
:param load_config: A function that accepts no arguments, and
when called, will return the full configuration dictionary
for the session (``session.full_config``).
:type client_creator: callable
:param client_creator: A factory function that will create
a client when called. Has the same interface as
``botocore.session.Session.create_client``.
:type cache: JSONFileCache
:param cache: An object that supports ``__getitem__``,
``__setitem__``, and ``__contains__``. An example
of this is the ``JSONFileCache`` class.
:type profile_name: str
:param profile_name: The name of the profile.
:type prompter: callable
:param prompter: A callable that returns input provided
by the user (i.e raw_input, getpass.getpass, etc.).
"""
#: The cache used to first check for assumed credentials.
#: This is checked before making the AssumeRole API
#: calls and can be useful if you have short lived
#: scripts and you'd like to avoid calling AssumeRole
#: until the credentials are expired.
self.cache = cache
self._load_config = load_config
# client_creator is a callable that creates function.
# It's basically session.create_client
self._client_creator = client_creator
self._profile_name = profile_name
self._prompter = prompter
# The _loaded_config attribute will be populated from the
# load_config() function once the configuration is actually
# loaded. The reason we go through all this instead of just
# requiring that the loaded_config be passed to us is to that
# we can defer configuration loaded until we actually try
# to load credentials (as opposed to when the object is
# instantiated).
self._loaded_config = {}
def load(self):
self._loaded_config = self._load_config()
if self._has_assume_role_config_vars():
return self._load_creds_via_assume_role()
def _has_assume_role_config_vars(self):
profiles = self._loaded_config.get('profiles', {})
return self.ROLE_CONFIG_VAR in profiles.get(self._profile_name, {})
def _load_creds_via_assume_role(self):
# We can get creds in one of two ways:
# * It can either be cached on disk from an pre-existing session
# * Cache doesn't have the creds (or is expired) so we need to make
# an assume role call to get temporary creds, which we then cache
# for subsequent requests.
creds = self._load_creds_from_cache()
if creds is not None:
logger.debug("Credentials for role retrieved from cache.")
return creds
else:
# We get the Credential used by botocore as well
# as the original parsed response from the server.
creds, response = self._retrieve_temp_credentials()
cache_key = self._create_cache_key()
self._write_cached_credentials(response, cache_key)
return creds
def _load_creds_from_cache(self):
cache_key = self._create_cache_key()
try:
from_cache = self.cache[cache_key]
if self._is_expired(from_cache):
# Don't need to delete the cache entry,
# when we refresh via AssumeRole, we'll
# update the cache with the new entry.
logger.debug("Credentials were found in cache, but they are expired.")
return None
else:
return self._create_creds_from_response(from_cache)
except KeyError:
return None
def _is_expired(self, credentials):
end_time = parse(credentials['Credentials']['Expiration'])
now = datetime.datetime.now(tzlocal())
seconds = total_seconds(end_time - now)
return seconds < self.EXPIRY_WINDOW_SECONDS
def _create_cache_key(self):
role_config = self._get_role_config_values()
# On windows, ':' is not allowed in filenames, so we'll
# replace them with '_' instead.
role_arn = role_config['role_arn'].replace(':', '_')
role_session_name=role_config.get('role_session_name')
if role_session_name:
cache_key = '%s--%s--%s' % (self._profile_name, role_arn, role_session_name)
else:
cache_key = '%s--%s' % (self._profile_name, role_arn)
return cache_key.replace('/', '-')
def _write_cached_credentials(self, creds, cache_key):
self.cache[cache_key] = creds
def _get_role_config_values(self):
# This returns the role related configuration.
profiles = self._loaded_config.get('profiles', {})
try:
source_profile = profiles[self._profile_name]['source_profile']
role_arn = profiles[self._profile_name]['role_arn']
mfa_serial = profiles[self._profile_name].get('mfa_serial')
except KeyError as e:
raise PartialCredentialsError(provider=self.METHOD,
cred_var=str(e))
external_id = profiles[self._profile_name].get('external_id')
role_session_name = profiles[self._profile_name].get('role_session_name')
if source_profile not in profiles:
raise InvalidConfigError(
error_msg=(
'The source_profile "%s" referenced in '
'the profile "%s" does not exist.' % (
source_profile, self._profile_name)))
source_cred_values = profiles[source_profile]
return {
'role_arn': role_arn,
'external_id': external_id,
'source_profile': source_profile,
'mfa_serial': mfa_serial,
'source_cred_values': source_cred_values,
'role_session_name': role_session_name
}
def _create_creds_from_response(self, response):
config = self._get_role_config_values()
if config.get('mfa_serial') is not None:
# MFA would require getting a new TokenCode which would require
# prompting the user for a new token, so we use a different
# refresh_func.
refresh_func = create_mfa_serial_refresher()
else:
refresh_func = create_assume_role_refresher(
self._create_client_from_config(config),
self._assume_role_base_kwargs(config))
return RefreshableCredentials(
access_key=response['Credentials']['AccessKeyId'],
secret_key=response['Credentials']['SecretAccessKey'],
token=response['Credentials']['SessionToken'],
method=self.METHOD,
expiry_time=_parse_if_needed(
response['Credentials']['Expiration']),
refresh_using=refresh_func)
def _create_client_from_config(self, config):
source_cred_values = config['source_cred_values']
client = self._client_creator(
'sts', aws_access_key_id=source_cred_values['aws_access_key_id'],
aws_secret_access_key=source_cred_values['aws_secret_access_key'],
aws_session_token=source_cred_values.get('aws_session_token'),
)
return client
def _retrieve_temp_credentials(self):
logger.debug("Retrieving credentials via AssumeRole.")
config = self._get_role_config_values()
client = self._create_client_from_config(config)
assume_role_kwargs = self._assume_role_base_kwargs(config)
if assume_role_kwargs.get('RoleSessionName') is None:
role_session_name = 'AWS-CLI-session-%s' % (int(time.time()))
assume_role_kwargs['RoleSessionName'] = role_session_name
response = client.assume_role(**assume_role_kwargs)
creds = self._create_creds_from_response(response)
return creds, response
def _assume_role_base_kwargs(self, config):
assume_role_kwargs = {'RoleArn': config['role_arn']}
if config['external_id'] is not None:
assume_role_kwargs['ExternalId'] = config['external_id']
if config['mfa_serial'] is not None:
token_code = self._prompter("Enter MFA code: ")
assume_role_kwargs['SerialNumber'] = config['mfa_serial']
assume_role_kwargs['TokenCode'] = token_code
if config['role_session_name'] is not None:
assume_role_kwargs['RoleSessionName'] = config['role_session_name']
return assume_role_kwargs
class CredentialResolver(object):
def __init__(self, providers):
"""
:param providers: A list of ``CredentialProvider`` instances.
"""
self.providers = providers
def insert_before(self, name, credential_provider):
"""
Inserts a new instance of ``CredentialProvider`` into the chain that will
be tried before an existing one.
:param name: The short name of the credentials you'd like to insert the
new credentials before. (ex. ``env`` or ``config``). Existing names
& ordering can be discovered via ``self.available_methods``.
:type name: string
:param cred_instance: An instance of the new ``Credentials`` object
you'd like to add to the chain.
:type cred_instance: A subclass of ``Credentials``
"""
try:
offset = [p.METHOD for p in self.providers].index(name)
except ValueError:
raise UnknownCredentialError(name=name)
self.providers.insert(offset, credential_provider)
def insert_after(self, name, credential_provider):
"""
Inserts a new type of ``Credentials`` instance into the chain that will
be tried after an existing one.
:param name: The short name of the credentials you'd like to insert the
new credentials after. (ex. ``env`` or ``config``). Existing names
& ordering can be discovered via ``self.available_methods``.
:type name: string
:param cred_instance: An instance of the new ``Credentials`` object
you'd like to add to the chain.
:type cred_instance: A subclass of ``Credentials``
"""
offset = self._get_provider_offset(name)
self.providers.insert(offset + 1, credential_provider)
def remove(self, name):
"""
Removes a given ``Credentials`` instance from the chain.
:param name: The short name of the credentials instance to remove.
:type name: string
"""
available_methods = [p.METHOD for p in self.providers]
if not name in available_methods:
# It's not present. Fail silently.
return
offset = available_methods.index(name)
self.providers.pop(offset)
def get_provider(self, name):
"""Return a credential provider by name.
:type name: str
:param name: The name of the provider.
:raises UnknownCredentialError: Raised if no
credential provider by the provided name
is found.
"""
return self.providers[self._get_provider_offset(name)]
def _get_provider_offset(self, name):
try:
return [p.METHOD for p in self.providers].index(name)
except ValueError:
raise UnknownCredentialError(name=name)
def load_credentials(self):
"""
Goes through the credentials chain, returning the first ``Credentials``
that could be loaded.
"""
# First provider to return a non-None response wins.
for provider in self.providers:
logger.debug("Looking for credentials via: %s", provider.METHOD)
creds = provider.load()
if creds is not None:
return creds
# If we got here, no credentials could be found.
# This feels like it should be an exception, but historically, ``None``
# is returned.
#
# +1
# -js
return None
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import inspect
from bisect import bisect
from collections import defaultdict, OrderedDict
import six
from itertools import chain
from .exceptions import ObjectDoesNotExist, FieldError, ModelError
from .helper import subclass_exception
from .connection import Connection
from .query import SpannerQuerySet
from .sql import v1 as sql_v1
class SpannerModelRegistry(object):
"""
Helper class that validates SpannerModels and helps with global operations on all registered models
(e.g. create/drop tables).
"""
registered_models = OrderedDict()
@classmethod
def register(cls, spanner_class):
"""
:type spanner_class: SpannerModelBase
:param spanner_class:
"""
assert isinstance(spanner_class, SpannerModelBase)
# test for table name collisions
registered_class = cls.registered_models.get(spanner_class._meta.table)
if registered_class and registered_class != spanner_class:
raise ModelError("SpannerModel.meta.table collision: %s %s" % (registered_class, spanner_class))
cls.registered_models[spanner_class._meta.table] = spanner_class
@classmethod
def get_registered_models_prio_dict(cls):
prio_dict = defaultdict(list)
for class_instance in cls.registered_models.values():
init_prio = cls._get_prio(class_instance)
prio_dict[init_prio].append(class_instance)
return prio_dict
@classmethod
def get_registered_models_in_correct_order(cls):
prio_dict = cls.get_registered_models_prio_dict()
for i in range(0, 10):
for o in prio_dict[i]:
yield o
@staticmethod
def _get_prio(model_class, i=0):
while model_class._meta.parent:
i += 1
model_class = model_class._meta.parent
if i >= 9:
break
return i
@classmethod
def create_table_statements(cls):
ddl_statements = []
for spanner_class in cls.get_registered_models_in_correct_order():
builder = sql_v1.SQLTable(spanner_class)
ddl_statements.extend(builder.stmt_create())
return ddl_statements
@classmethod
def create_tables(cls, connection_id=None):
ddl_statements = cls.create_table_statements()
database = Connection.get(connection_id=connection_id)
database.update_ddl(ddl_statements=ddl_statements).result()
@classmethod
def delete_table_statements(cls):
ddl_statements = []
for spanner_class in cls.get_registered_models_in_correct_order():
builder = sql_v1.SQLTable(spanner_class)
ddl_statements.extend(builder.stmt_delete())
return ddl_statements
@classmethod
def drop_tables(cls, connection_id=None):
ddl_statements = cls.drop_table_statements()
database = Connection.get(connection_id=connection_id)
database.update_ddl(ddl_statements=ddl_statements).result()
def register():
"""
Decorator that registers a spanner model in the registry.
"""
def _model_admin_wrapper(spanner_class):
SpannerModelRegistry.register(spanner_class)
return spanner_class
return _model_admin_wrapper
DEFAULT_NAMES = ('table', 'pk', 'parent', 'parent_on_delete', 'indices', 'indices_inherit', 'abstract')
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class SpannerModelMeta(object):
def __init__(self, meta):
self.model = None
self.meta = meta
self.table = ''
self.local_fields = []
self.field_lookup = {}
self.pk = []
self.primary = None
self.indices = []
self.index_lookup = {}
self.inherit_indices = True
self.parent = None
self.parent_on_delete = 'CASCADE'
self.abstract = False
def contribute_to_class(self, cls, name):
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
del self.meta
if not self.abstract and not self.table:
raise ValueError("%s must define a Meta.table!" % cls)
def add_field(self, field, check_if_already_added=False):
if check_if_already_added and self.field_lookup.get(field.name):
return
self.field_lookup[field.name] = field
self.local_fields.insert(bisect(self.local_fields, field), field)
def add_index(self, index):
from .indices import SpannerIndex, PrimaryKey
assert isinstance(index, SpannerIndex)
if self.index_lookup.get(index.name):
return
self.index_lookup[index.name] = index
# don't assign the primary key to the indices list
if isinstance(index, PrimaryKey):
self.primary = index
else:
self.indices.append(index)
def interleave_with_parent(self, parent):
# set parent on model meta
self.parent = parent
# copy all primary key field names that needs to be copied to this model
for field in parent._meta.primary.get_field_names():
new_field = copy.deepcopy(parent._meta.field_lookup[field])
new_field.contribute_to_class(self.model, new_field.name, check_if_already_added=True)
# add parent primary
self.primary.set_parent(parent._meta.primary)
def get_fields(self):
return self.local_fields
def _prepare(cls, model):
pass
class SpannerModelBase(type):
"""
Metaclass for all models, heavily borrowed from Django's awesome ORM.
Gosh, this "thin SQL wrapper for Cloud Spanner" is really getting out of hand... FML -.-
"""
# fixme: implement proxy model (later)
def __new__(cls, name, bases, attrs):
super_new = super(SpannerModelBase, cls).__new__
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, SpannerModelBase)]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
new_class.add_to_class('_meta', SpannerModelMeta(meta))
# add DoesNotExist exception
new_class.add_to_class(
'DoesNotExist',
subclass_exception(
str('DoesNotExist'),
tuple(
x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (ObjectDoesNotExist,),
module,
attached_to=new_class))
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
all_fields = chain(
new_class._meta.local_fields
)
# All the index_fields of any type declared on this model
field_names = {f.name for f in all_fields}
# interleave parent
interleave_with = new_class._meta.parent
# add indices to class
for index in new_class._meta.indices:
new_index = index
new_class.add_to_class(new_index.name, new_index)
# Do the appropriate setup for any model parents.
pk_inherited_from_parent = False
for base in parents:
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
# only allow one _meta.parent!
if interleave_with and base._meta.parent and interleave_with != base._meta.parent:
raise ValueError("%s may only have one Meta.parent, found different parent in %s!" % (new_class, base))
if not interleave_with:
interleave_with = base._meta.parent
# add class parent's fields to model
parent_fields = base._meta.local_fields
for field in parent_fields:
# Check for clashes between locally declared index_fields and those
# on the base classes (we cannot handle shadowed index_fields at the
# moment).
if field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' % (field.name, name, base.__name__)
)
new_field = copy.deepcopy(field)
new_class.add_to_class(field.name, new_field)
# assign first parent's pk if this class has no pk defined
# make sure that our parent's only have one primary key, otherwise we run into problems.
if base._meta.pk and pk_inherited_from_parent:
raise ModelError("%s may only inherit one primary key!" % new_class)
if base._meta.pk and not new_class._meta.pk:
new_class._meta.pk = base._meta.pk
pk_inherited_from_parent = True
# copy parent's indices if this class defines to indices.
if new_class._meta.inherit_indices:
for index in base._meta.indices:
new_index = copy.deepcopy(index)
new_class.add_to_class(new_index.name, new_index)
# convert primary key field list/tuple to PrimaryKey index
if isinstance(new_class._meta.pk, (list, tuple)):
from .indices import PrimaryKey
new_index = PrimaryKey(fields=new_class._meta.pk)
new_class.add_to_class(new_index.name, new_index)
# interleave table with parent table (also copies&prepends _meta.pk index_fields from parent)
if interleave_with:
new_class._meta.interleave_with_parent(interleave_with)
# register class in registry (if not abstract)
if not new_class._meta.abstract:
SpannerModelRegistry.register(new_class)
setattr(new_class, 'objects', SpannerQuerySet(new_class))
new_class._prepare()
return new_class
def add_to_class(cls, name, value):
# We should call the contribute_to_class method only if it's bound
if not inspect.isclass(value) and hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
@six.python_2_unicode_compatible
class SpannerModel(six.with_metaclass(SpannerModelBase)):
# default queryset to shut automatic code analysis up
objects = SpannerQuerySet()
def __init__(self, *args, **kwargs):
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.local_fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of index_fields")
if not kwargs:
fields_iter = iter(self._meta.local_fields)
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
fields_iter = iter(self._meta.local_fields)
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Now we're left with the unprocessed index_fields that *must* come from
# keywords, or default.
for field in fields_iter:
if kwargs:
try:
val = kwargs.pop(field.name)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
setattr(self, field.name, val)
if kwargs:
for prop in list(kwargs):
try:
if isinstance(getattr(self.__class__, prop), property):
setattr(self, prop, kwargs.pop(prop))
except AttributeError:
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
super(SpannerModel, self).__init__()
def __str__(self):
return '%s<%s>' % (self._class__, self._meta.table)
@classmethod
def from_db(cls, db, field_names, values):
new = cls(**dict(zip(field_names, values)))
new._state.adding = False
new._state.db = db
return new
@classmethod
def get_table_name(cls):
if cls.Meta.table:
return cls.Meta.table
else:
c = cls.__mro__[0]
name = c.__module__ + "." + c.__name__
return name
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if len(update_fields) == 0:
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.local_fields:
field_names.add(field.name)
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this "
"model or are m2m fields: %s"
% ', '.join(non_model_fields))
self.save_base(using=using, force_insert=force_insert,
force_update=force_update, update_fields=update_fields)
def save_base(self, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Handles the parts of saving which should be done only once per save,
yet need to be done in raw saves, too. This includes some sanity
checks and signal sending.
"""
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or len(update_fields) > 0
cls = origin = self.__class__
# Skip proxies, but keep the origin as the proxy model.
meta = cls._meta
updated = self._save_table(cls, force_insert, force_update, using, update_fields)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
def _save_table(self, cls=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Does the heavy-lifting involved in saving. Updates or inserts the data
for a single table.
"""
meta = cls._meta
pk_val = self._get_pk_val(meta)
non_pks = [f for f in meta.local_fields if f.name not in pk_val['keys']]
if update_fields:
non_pks = [f for f in non_pks if f.name in update_fields]
pk_set = not bool(pk_val['missing'])
if not pk_set and (force_update or update_fields):
raise ValueError("Cannot force an update in save() with no primary key.")
updated = False
# If possible, try an UPDATE. If that doesn't update anything, do an INSERT.
# UPDATE
if pk_set and not force_insert:
forced_update = update_fields or force_update
updated = self._do_update(using, pk_val, non_pks, forced_update)
if force_update and not updated:
raise ModelError("Forced update did not affect any rows.")
# INSERT
if not updated:
# todo: support auto-generated pk values
if not pk_set:
raise ValueError("Can't insert value without primary key values! Missing: %s" % pk_val['missing'])
self._do_insert(using, pk_val, non_pks)
return updated
def _do_update(self, using, pk_val, update_fields, forced_update):
"""
This method will try to update the model. If the model was updated (in
the sense that an update query was done and a matching row was found
from the DB) the method will return True.
"""
database = Connection.get(connection_id=using)
with database.batch() as batch:
# add primary keys to the updated columns
columns = [pk_val['keys']] + [f.name for f in update_fields]
result = batch.update(
table=self._meta.table,
columns=columns,
values=pk_val['values'] + [f.to_db(self, False) for f in update_fields]
)
print (result)
return True
def _do_insert(self, using, pk_val, update_fields):
"""
Do an INSERT. If update_pk is defined then this method should return
the new pk for the model.
"""
database = Connection.get(connection_id=using)
with database.batch() as batch:
# add primary keys to the updated columns
columns = [pk_val['keys']] + [f.name for f in self.meta.local_fields]
result = batch.insert(
table=self._meta.table,
columns=columns,
values=pk_val['values'] + [f.to_db(self, True) for f in update_fields]
)
pass
def delete(self, using=None, keep_parents=False):
pk_val = self._get_pk_val()
assert pk_val['missing'] is False, (
"%s object can't be deleted because primary keys are missing: %s." %
(self._meta.object_name, pk_val['missing'])
)
database = Connection.get(connection_id=using)
with database.batch() as batch:
batch.delete(
table=self._meta.table,
columns=pk_val['keys'],
values=pk_val['values']
)
return True
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
keys = meta.primary.fields
values = [getattr(self, k) for k in keys]
pk_data = {
'keys': set(keys),
'values': values,
'missing': [keys[i] for i, v in enumerate(values) if not v],
}
return pk_data
|
|
import re
import matplotlib
matplotlib.use('pdf')
from matplotlib import pyplot
from matplotlib.patches import PathPatch
from matplotlib.path import Path
from shapely.wkt import loads as load_wkt
from shapely import affinity
import numpy as np
from simdna.simulations import loaded_motifs
from pkg_resources import resource_filename
##########################################################################
# copied from descartes
# https://pypi.python.org/pypi/descartes
class Polygon(object):
# Adapt Shapely or GeoJSON/geo_interface polygons to a common interface
def __init__(self, context):
if hasattr(context, 'interiors'):
self.context = context
else:
self.context = getattr(context, '__geo_interface__', context)
@property
def geom_type(self):
return (
getattr(self.context, 'geom_type', None) or self.context['type'])
@property
def exterior(self):
return (
getattr(self.context, 'exterior', None) or self.context['coordinates'][0])
@property
def interiors(self):
value = getattr(self.context, 'interiors', None)
if value is None:
value = self.context['coordinates'][1:]
return value
def PolygonPath(polygon):
"""Constructs a compound matplotlib path from a Shapely or GeoJSON-like
geometric object"""
this = Polygon(polygon)
assert this.geom_type == 'Polygon'
def coding(ob):
# The codes will be all "LINETO" commands, except for "MOVETO"s at the
# beginning of each subpath
n = len(getattr(ob, 'coords', None) or ob)
vals = np.ones(n, dtype=Path.code_type) * Path.LINETO
vals[0] = Path.MOVETO
return vals
vertices = np.concatenate(
[np.asarray(this.exterior)] + [np.asarray(r)
for r in this.interiors])
codes = np.concatenate(
[coding(this.exterior)] + [coding(r)
for r in this.interiors])
return Path(vertices, codes)
def PolygonPatch(polygon, **kwargs):
"""Constructs a matplotlib patch from a geometric object
The `polygon` may be a Shapely
or GeoJSON-like object with or without holes.
The `kwargs` are those supported by the matplotlib.patches.Polygon class
constructor. Returns an instance of matplotlib.patches.PathPatch.
Example (using Shapely Point and a matplotlib axes):
>>> b = Point(0, 0).buffer(1.0)
>>> patch = PolygonPatch(b, fc='blue', ec='blue', alpha=0.5)
>>> axis.add_patch(patch)
"""
return PathPatch(PolygonPath(polygon), **kwargs)
#
# END copied from descartes
#
##########################################################################
##########################################################################
# Initialize the polygon paths for A,C,G,T
#
# Geometry taken from JTS TestBuilder Monospace font with fixed precision model
# of 1000.0
#
A_data = """
MULTIPOLYGON (
((24.7631 57.3346, 34.3963 57.3346, 52.391 -1.422, 44.1555 -1.422, 39.8363
13.8905, 19.2476 13.8905, 15.0039 -1.422, 6.781 -1.422, 24.7631 57.3346)),
((29.5608 50.3205, 21.1742 20.2623, 37.9474 20.2623, 29.5608 50.3205))
)
"""
C_data = """POLYGON((
52.391 2.5937, 48.5882 0.8417, 44.68 -0.4142, 40.5998 -1.17, 36.2814 -1.422,
32.8755 -1.2671, 29.6656 -0.8024, 26.6518 -0.0278, 23.834 1.0565,
21.2122 2.4507, 18.7865 4.1547, 16.5569 6.1686, 14.5233 8.4922,
12.7087 11.0966, 11.136 13.9527, 9.8053 17.0606, 8.7166 20.4201,
7.8698 24.0314, 7.2649 27.8943, 6.902 32.009, 6.781 36.3754, 6.9027 40.7209,
7.2678 44.8198, 7.8764 48.6722, 8.7283 52.278, 9.8236 55.6371,
11.1624 58.7497, 12.7446 61.6157, 14.5702 64.2351, 16.6133 66.5753,
18.8481 68.6034, 21.2745 70.3195, 23.8926 71.7235, 26.7023 72.8156,
29.7037 73.5956, 32.8967 74.0637, 36.2814 74.2197, 40.5998 73.9697,
44.68 73.2196, 48.5882 71.9696, 52.391 70.2196, 52.391 60.1101,
48.6468 62.739, 44.6331 64.657, 40.4709 65.8289, 36.2814 66.2196,
31.7716 65.7557, 29.7437 65.1758, 27.8672 64.3641, 26.1421 63.3203,
24.5684 62.0447, 23.146 60.5371, 21.875 58.7976, 19.7831 54.6129,
18.289 49.481, 17.3925 43.4019, 17.0936 36.3754, 17.3925 29.3763,
18.289 23.3166, 19.7831 18.1964, 21.875 14.0157, 23.146 12.2762,
24.5684 10.7686, 26.1421 9.4929, 27.8672 8.4492, 29.7437 7.6375,
31.7716 7.0576, 36.2814 6.5937, 40.5354 6.9844, 44.7034 8.1563,
48.6878 10.0743, 52.391 12.7032, 52.391 2.5937))"""
G_data = """POLYGON((
52.391 5.4974, 50.49 3.8964, 48.4724 2.502, 46.3383 1.3144, 44.0877 0.3334,
41.7314 -0.4346, 39.2805 -0.9832, 34.0946 -1.422, 30.9504 -1.2772,
27.9859 -0.843, 25.2009 -0.1191, 22.5956 0.8942, 20.1698 2.197,
17.9236 3.7894, 15.857 5.6713, 13.9699 7.8428, 12.285 10.2753,
10.8248 12.9404, 9.5892 15.8381, 8.5782 18.9685, 7.7919 22.3315,
7.2303 25.9271, 6.8933 29.7553, 6.781 33.8161, 6.8948 37.8674,
7.2362 41.6888, 7.8053 45.2803, 8.6019 48.6419, 9.6262 51.7737, 10.878 54.6755,
12.3575 57.3474, 14.0646 59.7895, 15.9743 61.9712, 18.0615 63.862,
20.3262 65.4618, 22.7685 66.7708, 25.3884 67.789, 28.1857 68.5162,
31.1606 68.9525, 34.3131 69.098, 38.5048 68.7957, 42.5144 67.8889,
46.3638 66.3703, 50.0748 64.2325, 50.0748 54.8075, 46.342 57.8466,
42.5144 59.9716, 38.5266 61.2226, 34.3131 61.6395, 30.1132 61.2053,
28.2228 60.6624, 26.4723 59.9024, 24.8614 58.9253, 23.3904 57.731,
22.0591 56.3195, 20.8675 54.691, 18.9046 50.7806, 17.5025 45.998,
16.6612 40.3432, 16.3808 33.8161, 16.6526 27.1962, 17.4679 21.4959,
18.8267 16.7151, 20.7291 12.8539, 21.8892 11.2595, 23.1951 9.8776,
24.6469 8.7084, 26.2446 7.7517, 27.9883 7.0076, 29.8778 6.4762, 34.0946 6.051,
36.9534 6.2276, 39.4407 6.7575, 41.6331 7.6625, 43.607 8.9644, 43.607 27.2172,
33.7304 27.2172, 33.7304 34.7776, 52.391 34.7776, 52.391 5.4974
))"""
T_data = """POLYGON((
6.781 58.3746, 52.391 58.3746, 52.391 51.5569, 33.6933 51.5569, 33.6933 -1.422,
25.5684 -1.422, 25.5684 51.5569, 6.781 51.5569, 6.781 58.3746
))"""
def standardize_polygons_str(data_str):
"""Given a POLYGON string, standardize the coordinates to a 1x1 grid.
Input : data_str (taken from above)
Output: tuple of polygon objects
"""
# find all of the polygons in the letter (for instance an A
# needs to be constructed from 2 polygons)
path_strs = re.findall("\(\(([^\)]+?)\)\)", data_str.strip())
# convert the data into a numpy array
polygons_data = []
for path_str in path_strs:
data = np.array([
tuple(map(float, x.split())) for x in path_str.strip().split(",")])
polygons_data.append(data)
# standardize the coordinates
min_coords = np.vstack(data.min(0) for data in polygons_data).min(0)
max_coords = np.vstack(data.max(0) for data in polygons_data).max(0)
for data in polygons_data:
data[:, ] -= min_coords
data[:, ] /= (max_coords - min_coords)
polygons = []
for data in polygons_data:
polygons.append(load_wkt(
"POLYGON((%s))" % ",".join(" ".join(map(str, x)) for x in data)))
return tuple(polygons)
letters_polygons = {}
letters_polygons['A'] = standardize_polygons_str(A_data)
letters_polygons['C'] = standardize_polygons_str(C_data)
letters_polygons['G'] = standardize_polygons_str(G_data)
letters_polygons['T'] = standardize_polygons_str(T_data)
colors = dict(zip(
'ACGT', (('green', 'white'), ('blue',), ('orange',), ('red',))
))
def add_letter_to_axis(ax, let, x, y, height):
"""Add 'let' with position x,y and height height to matplotlib axis 'ax'.
"""
for polygon, color in zip(letters_polygons[let], colors[let]):
new_polygon = affinity.scale(
polygon, yfact=height, origin=(0, 0, 0))
new_polygon = affinity.translate(
new_polygon, xoff=x, yoff=y)
patch = PolygonPatch(
new_polygon, edgecolor=color, facecolor=color)
ax.add_patch(patch)
return
def plot_bases_on_ax(letter_heights, ax):
"""
Plot the N letters with heights taken from the Nx4 matrix letter_heights.
Parameters
----------
letter_heights: Nx4 array
ax: axis to plot on
"""
assert letter_heights.shape[1] == 4, letter_heights.shape
for x_pos, heights in enumerate(letter_heights):
letters_and_heights = sorted(zip(heights, 'ACGT'))
y_pos_pos = 0.0
y_neg_pos = 0.0
for height, letter in letters_and_heights:
if height > 0:
add_letter_to_axis(ax, letter, 0.5 + x_pos, y_pos_pos, height)
y_pos_pos += height
elif height < 0:
add_letter_to_axis(ax, letter, 0.5 + x_pos, y_neg_pos, height)
y_neg_pos += height
ax.set_xlim(0, letter_heights.shape[0] + 1)
ax.set_xticks(np.arange(1, letter_heights.shape[0] + 1))
ax.set_aspect(aspect='auto', adjustable='box')
ax.autoscale_view()
def plot_bases(letter_heights, figsize=(12, 6), ylab='bits'):
"""
Plot the N letters with heights taken from the Nx4 matrix letter_heights.
Parameters
----------
letter_heights: Nx4 array
ylab: y axis label
Returns
-------
pyplot figure
"""
assert letter_heights.shape[1] == 4, letter_heights.shape
fig = pyplot.figure(figsize=figsize)
ax = fig.add_subplot(111)
ax.set_xlabel('pos')
ax.set_ylabel(ylab)
plot_bases_on_ax(letter_heights, ax)
return fig
def plot_pwm(letter_heights,
figsize=(12, 6), ylab='bits', information_content=True):
"""
Plots pwm. Displays information content by default.
"""
if information_content:
letter_heights = letter_heights * (
2 + (letter_heights *
np.log2(letter_heights)).sum(axis=1))[:, np.newaxis]
return plot_bases(letter_heights, figsize, ylab=ylab)
def plot_motif(motif_name, figsize, ylab='bits', information_content=True):
"""
Plot motifs from encode motifs file
"""
motif_letter_heights = loaded_motifs.getPwm(motif_name).getRows()
return plot_pwm(motif_letter_heights, figsize,
ylab=ylab, information_content=information_content)
def add_letters_to_axis(ax, letter_heights):
"""
Plots letter on user-specified axis.
Parameters
----------
ax : axis
letter_heights: Nx4 array
"""
assert letter_heights.shape[1] == 4
x_range = [1, letter_heights.shape[0]]
pos_heights = np.copy(letter_heights)
pos_heights[letter_heights < 0] = 0
neg_heights = np.copy(letter_heights)
neg_heights[letter_heights > 0] = 0
for x_pos, heights in enumerate(letter_heights):
letters_and_heights = sorted(zip(heights, 'ACGT'))
y_pos_pos = 0.0
y_neg_pos = 0.0
for height, letter in letters_and_heights:
if height > 0:
add_letter_to_axis(ax, letter, 0.5 + x_pos, y_pos_pos, height)
y_pos_pos += height
else:
add_letter_to_axis(ax, letter, 0.5 + x_pos, y_neg_pos, height)
y_neg_pos += height
ax.set_xlim(x_range[0] - 1, x_range[1] + 1)
ax.set_xticks(range(*x_range) + [x_range[-1]])
ax.set_aspect(aspect='auto', adjustable='box')
ax.autoscale_view()
|
|
#!/usr/bin/env python
def _complain_ifclosed(closed):
if closed:
raise ValueError, "I/O operation on closed file"
class StringIO:
"""class StringIO([buffer])
When a StringIO object is created, it can be initialized to an existing
string by passing the string to the constructor. If no string is given,
the StringIO will start empty.
The StringIO object can accept either Unicode or 8-bit strings, but
mixing the two may take some care. If both are used, 8-bit strings that
cannot be interpreted as 7-bit ASCII (that use the 8th bit) will cause
a UnicodeError to be raised when getvalue() is called.
"""
def __init__(self, buf = ''):
# Force self.buf to be a string or unicode
if not isinstance(buf, str):
buf = str(buf)
self.buf = buf
self.len = len(buf)
self.buflist = []
self.pos = 0
self.closed = False
self.softspace = 0
def __iter__(self):
return self
def next(self):
"""A file object is its own iterator, for example iter(f) returns f
(unless f is closed). When a file is used as an iterator, typically
in a for loop (for example, for line in f: print line), the next()
method is called repeatedly. This method returns the next input line,
or raises StopIteration when EOF is hit.
"""
_complain_ifclosed(self.closed)
r = self.readline()
if not r:
raise StopIteration
return r
def close(self):
"""Free the memory buffer.
"""
if not self.closed:
self.closed = True
del self.buf
del self.pos
def isatty(self):
"""Returns False because StringIO objects are not connected to a
tty-like device.
"""
_complain_ifclosed(self.closed)
return False
def seek(self, pos, mode = 0):
"""Set the file's current position.
The mode argument is optional and defaults to 0 (absolute file
positioning); other values are 1 (seek relative to the current
position) and 2 (seek relative to the file's end).
There is no return value.
"""
_complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
if mode == 1:
pos += self.pos
elif mode == 2:
pos += self.len
self.pos = max(0, pos)
def tell(self):
"""Return the file's current position."""
_complain_ifclosed(self.closed)
return self.pos
def read(self, n = -1):
"""Read at most size bytes from the file
(less if the read hits EOF before obtaining size bytes).
If the size argument is negative or omitted, read all data until EOF
is reached. The bytes are returned as a string object. An empty
string is returned when EOF is encountered immediately.
"""
_complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
if n < 0:
newpos = self.len
else:
newpos = min(self.pos+n, self.len)
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
def readline(self, length=None):
r"""Read one entire line from the file.
A trailing newline character is kept in the string (but may be absent
when a file ends with an incomplete line). If the size argument is
present and non-negative, it is a maximum byte count (including the
trailing newline) and an incomplete line may be returned.
An empty string is returned only when EOF is encountered immediately.
Note: Unlike stdio's fgets(), the returned string contains null
characters ('\0') if they occurred in the input.
"""
_complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
i = self.buf.find('\n', self.pos)
if i < 0:
newpos = self.len
else:
newpos = i+1
if length is not None:
if self.pos + length < newpos:
newpos = self.pos + length
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
def readlines(self, sizehint = 0):
"""Read until EOF using readline() and return a list containing the
lines thus read.
If the optional sizehint argument is present, instead of reading up
to EOF, whole lines totalling approximately sizehint bytes (or more
to accommodate a final whole line).
"""
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def truncate(self, size=None):
"""Truncate the file's size.
If the optional size argument is present, the file is truncated to
(at most) that size. The size defaults to the current position.
The current file position is not changed unless the position
is beyond the new file size.
If the specified size exceeds the file's current size, the
file remains unchanged.
"""
_complain_ifclosed(self.closed)
if size is None:
size = self.pos
elif size < 0:
raise IOError(EINVAL, "Negative size not allowed")
elif size < self.pos:
self.pos = size
self.buf = self.getvalue()[:size]
self.len = size
def write(self, s):
"""Write a string to the file.
There is no return value.
"""
_complain_ifclosed(self.closed)
if not s: return
# Force s to be a string or unicode
if not isinstance(s, str):
s = str(s)
spos = self.pos
slen = self.len
if spos == slen:
self.buflist.append(s)
self.len = self.pos = spos + len(s)
return
if spos > slen:
self.buflist.append('\0'*(spos - slen))
slen = spos
newpos = spos + len(s)
if spos < slen:
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = [self.buf[:spos], s, self.buf[newpos:]]
self.buf = ''
if newpos > slen:
slen = newpos
else:
self.buflist.append(s)
slen = newpos
self.len = slen
self.pos = newpos
def writelines(self, iterable):
"""Write a sequence of strings to the file. The sequence can be any
iterable object producing strings, typically a list of strings. There
is no return value.
(The name is intended to match readlines(); writelines() does not add
line separators.)
"""
write = self.write
for line in iterable:
write(line)
def flush(self):
"""Flush the internal buffer
"""
_complain_ifclosed(self.closed)
def getvalue(self):
"""
Retrieve the entire contents of the "file" at any time before
the StringIO object's close() method is called.
The StringIO object can accept either Unicode or 8-bit strings,
but mixing the two may take some care. If both are used, 8-bit
strings that cannot be interpreted as 7-bit ASCII (that use the
8th bit) will cause a UnicodeError to be raised when getvalue()
is called.
"""
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
return self.buf
# A little test suite
def test():
#import sys
#if sys.argv[1:]:
# file = sys.argv[1]
#else:
import os
open = os.File
file = 'example'
lines = open(file, 'r').readlines()
text = open(file, 'r').read()
f = StringIO()
for line in lines[:-2]:
f.write(line)
f.writelines(lines[-2:])
if f.getvalue() != text:
raise RuntimeError('write failed', f.getvalue(), text)
length = f.tell()
print 'File length =', length
f.seek(len(lines[0]))
f.write(lines[1])
f.seek(0)
print 'First line =', repr(f.readline())
print 'Position =', f.tell()
line = f.readline()
print 'Second line =', repr(line)
f.seek(-len(line), 1)
line2 = f.read(len(line))
if line != line2:
raise RuntimeError, 'bad result after seek back'
f.seek(len(line2), 1)
list = f.readlines()
line = list[-1]
f.seek(f.tell() - len(line))
line2 = f.read()
if line != line2:
raise RuntimeError, 'bad result after seek back from EOF'
print 'Read', len(list), 'more lines'
print 'File length =', f.tell()
if f.tell() != length:
raise RuntimeError, 'bad length'
f.truncate(length/2)
f.seek(0, 2)
print 'Truncated length =', f.tell()
if f.tell() != length/2:
raise RuntimeError, 'truncate did not adjust length'
f.close()
#if __name__ == '__main__':
# test()
|
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import IECore
import Gaffer
import GafferUI
import GafferScene
import _GafferSceneUI
##########################################################################
# SceneHierarchy
##########################################################################
class SceneHierarchy( GafferUI.NodeSetEditor ) :
def __init__( self, scriptNode, **kw ) :
column = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, borderWidth = 8, spacing = 4 )
GafferUI.NodeSetEditor.__init__( self, column, scriptNode, **kw )
searchFilter = _GafferSceneUI._SceneHierarchySearchFilter()
setFilter = _GafferSceneUI._SceneHierarchySetFilter()
setFilter.setEnabled( False )
self.__filter = Gaffer.CompoundPathFilter( [ searchFilter, setFilter ] )
with column :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 ) :
_SearchFilterWidget( searchFilter )
_SetFilterWidget( setFilter )
self.__pathListing = GafferUI.PathListingWidget(
Gaffer.DictPath( {}, "/" ), # temp till we make a ScenePath
columns = [ GafferUI.PathListingWidget.defaultNameColumn ],
allowMultipleSelection = True,
displayMode = GafferUI.PathListingWidget.DisplayMode.Tree,
)
self.__pathListing.setDragPointer( "objects" )
self.__pathListing.setSortable( False )
self.__selectionChangedConnection = self.__pathListing.selectionChangedSignal().connect( Gaffer.WeakMethod( self.__selectionChanged ) )
self.__expansionChangedConnection = self.__pathListing.expansionChangedSignal().connect( Gaffer.WeakMethod( self.__expansionChanged ) )
self.__plug = None
self._updateFromSet()
def __repr__( self ) :
return "GafferSceneUI.SceneHierarchy( scriptNode )"
def _updateFromSet( self ) :
# first of all decide what plug we're viewing.
self.__plug = None
self.__plugParentChangedConnection = None
node = self._lastAddedNode()
if node is not None :
outputScenePlugs = [ p for p in node.children( GafferScene.ScenePlug ) if p.direction() == Gaffer.Plug.Direction.Out ]
if len( outputScenePlugs ) :
self.__plug = outputScenePlugs[0]
self.__plugParentChangedConnection = self.__plug.parentChangedSignal().connect( Gaffer.WeakMethod( self.__plugParentChanged ) )
# call base class update - this will trigger a call to _titleFormat(),
# hence the need for already figuring out the plug.
GafferUI.NodeSetEditor._updateFromSet( self )
# update our view of the hierarchy
self.__setPathListingPath()
def _updateFromContext( self, modifiedItems ) :
if "ui:scene:selectedPaths" in modifiedItems :
self.__transferSelectionFromContext()
elif "ui:scene:expandedPaths" in modifiedItems :
self.__transferExpansionFromContext()
for item in modifiedItems :
if not item.startswith( "ui:" ) :
# When the context has changed, the hierarchy of the scene may
# have too so we should update our PathListingWidget.
self.__setPathListingPath()
break
def _titleFormat( self ) :
return GafferUI.NodeSetEditor._titleFormat(
self,
_maxNodes = 1 if self.__plug is not None else 0,
_reverseNodes = True,
_ellipsis = False
)
def __plugParentChanged( self, plug, oldParent ) :
# the plug we were viewing has been deleted or moved - find
# another one to view.
self._updateFromSet()
@GafferUI.LazyMethod( deferUntilPlaybackStops = True )
def __setPathListingPath( self ) :
for f in self.__filter.getFilters() :
f.setScene( self.__plug )
if self.__plug is not None :
# We take a static copy of our current context for use in the ScenePath - this prevents the
# PathListing from updating automatically when the original context changes, and allows us to take
# control of updates ourselves in _updateFromContext(), using LazyMethod to defer the calls to this
# function until we are visible and playback has stopped.
contextCopy = Gaffer.Context( self.getContext() )
for f in self.__filter.getFilters() :
f.setContext( contextCopy )
self.__pathListing.setPath( GafferScene.ScenePath( self.__plug, contextCopy, "/", filter = self.__filter ) )
self.__transferExpansionFromContext()
self.__transferSelectionFromContext()
else :
self.__pathListing.setPath( Gaffer.DictPath( {}, "/" ) )
def __expansionChanged( self, pathListing ) :
assert( pathListing is self.__pathListing )
paths = pathListing.getExpandedPaths()
paths = IECore.StringVectorData( [ "/" ] + [ str( path ) for path in paths ] )
pathMatcherData = GafferScene.PathMatcherData()
pathMatcherData.value.init( paths )
with Gaffer.BlockedConnection( self._contextChangedConnection() ) :
self.getContext().set( "ui:scene:expandedPaths", pathMatcherData )
def __selectionChanged( self, pathListing ) :
assert( pathListing is self.__pathListing )
paths = pathListing.getSelectedPaths()
paths = IECore.StringVectorData( [ str( path ) for path in paths ] )
with Gaffer.BlockedConnection( self._contextChangedConnection() ) :
self.getContext().set( "ui:scene:selectedPaths", paths )
@GafferUI.LazyMethod( deferUntilPlaybackStops = True )
def __transferExpansionFromContext( self ) :
expandedPaths = self.getContext().get( "ui:scene:expandedPaths", None )
if expandedPaths is None :
return
p = self.__pathListing.getPath()
expandedPaths = [ p.copy().setFromString( s ) for s in expandedPaths.value.paths() ]
with Gaffer.BlockedConnection( self.__expansionChangedConnection ) :
self.__pathListing.setExpandedPaths( expandedPaths )
@GafferUI.LazyMethod( deferUntilPlaybackStops = True )
def __transferSelectionFromContext( self ) :
selection = self.getContext()["ui:scene:selectedPaths"]
with Gaffer.BlockedConnection( self.__selectionChangedConnection ) :
## \todo Qt is dog slow with large non-contiguous selections,
# so we're only mirroring single selections currently. Rewrite
# PathListingWidget so it manages selection itself using a PathMatcher
# and we can refer to the same data structure everywhere, and reenable
# mirroring of multi-selection.
if len( selection ) == 1 :
p = self.__pathListing.getPath()
selection = [ p.copy().setFromString( s ) for s in selection ]
self.__pathListing.setSelectedPaths( selection, scrollToFirst=True, expandNonLeaf=False )
else :
self.__pathListing.setSelectedPaths( [] )
GafferUI.EditorWidget.registerType( "SceneHierarchy", SceneHierarchy )
##########################################################################
# _SetFilterWidget
##########################################################################
class _SetFilterWidget( GafferUI.PathFilterWidget ) :
def __init__( self, pathFilter ) :
button = GafferUI.MenuButton(
"Sets",
menu = GafferUI.Menu(
Gaffer.WeakMethod( self.__setsMenuDefinition ),
title = "Set Filter"
)
)
GafferUI.PathFilterWidget.__init__( self, button, pathFilter )
def _updateFromPathFilter( self ) :
pass
def __setsMenuDefinition( self ) :
m = IECore.MenuDefinition()
availableSets = set()
if self.pathFilter().getScene() is not None :
with self.pathFilter().getContext() :
availableSets.update( str( s ) for s in self.pathFilter().getScene()["setNames"].getValue() )
builtInSets = { "__lights", "__cameras", "__coordinateSystems" }
selectedSets = set( self.pathFilter().getSetNames() )
m.append( "/Enabled", { "checkBox" : self.pathFilter().getEnabled(), "command" : Gaffer.WeakMethod( self.__toggleEnabled ) } )
m.append( "/EnabledDivider", { "divider" : True } )
m.append(
"/All", {
"active" : self.pathFilter().getEnabled() and selectedSets.issuperset( availableSets ),
"checkBox" : selectedSets.issuperset( availableSets ),
"command" : functools.partial( Gaffer.WeakMethod( self.__setSets ), builtInSets | availableSets | selectedSets )
}
)
m.append(
"/None", {
"active" : self.pathFilter().getEnabled() and len( selectedSets ),
"checkBox" : not len( selectedSets ),
"command" : functools.partial( Gaffer.WeakMethod( self.__setSets ), set() )
}
)
m.append( "/AllDivider", { "divider" : True } )
def item( setName ) :
updatedSets = set( selectedSets )
if setName in updatedSets :
updatedSets.remove( setName )
else :
updatedSets.add( setName )
return {
"active" : self.pathFilter().getEnabled() and s in availableSets,
"checkBox" : s in selectedSets,
"command" : functools.partial( Gaffer.WeakMethod( self.__setSets ), updatedSets )
}
for s in sorted( builtInSets ) :
m.append(
"/%s" % IECore.CamelCase.toSpaced( s[2:] ),
item( s )
)
if len( availableSets - builtInSets ) :
m.append( "/BuiltInDivider", { "divider" : True } )
for s in sorted( availableSets | selectedSets ) :
if s in builtInSets :
continue
m.append( "/" + str( s ), item( s ) )
return m
def __toggleEnabled( self, *unused ) :
self.pathFilter().setEnabled( not self.pathFilter().getEnabled() )
def __setSets( self, sets, *unused ) :
self.pathFilter().setSetNames( sets )
##########################################################################
# _SearchFilterWidget
##########################################################################
class _SearchFilterWidget( GafferUI.PathFilterWidget ) :
def __init__( self, pathFilter ) :
self.__patternWidget = GafferUI.TextWidget()
GafferUI.PathFilterWidget.__init__( self, self.__patternWidget, pathFilter )
self.__patternWidget._qtWidget().setPlaceholderText( "Filter..." )
self.__patternWidgetEditingFinishedConnection = self.__patternWidget.editingFinishedSignal().connect( Gaffer.WeakMethod( self.__patternEditingFinished ) )
self._updateFromPathFilter()
def _updateFromPathFilter( self ) :
self.__patternWidget.setText( self.pathFilter().getMatchPattern() )
def __patternEditingFinished( self, widget ) :
self.pathFilter().setMatchPattern( self.__patternWidget.getText() )
|
|
from django.test import TestCase, Client
from django.contrib.auth.models import User
from binder.json import jsonloads, jsondumps
from .testapp.models import Animal, Caretaker, Zoo
class AnnotationTestCase(TestCase):
def setUp(self):
super().setUp()
u = User(username='testuser', is_active=True, is_superuser=True)
u.set_password('test')
u.save()
self.client = Client()
r = self.client.login(username='testuser', password='test')
self.assertTrue(r)
self.caretaker = Caretaker(name='carl')
self.caretaker.save()
self.zoo = Zoo(name='Apenheul')
self.zoo.save()
self.animal = Animal(name='Harambe', zoo=self.zoo, caretaker=self.caretaker)
self.animal.save()
def test_get_data(self):
res = self.client.get('/caretaker/{}/'.format(self.caretaker.pk))
self.assertEqual(res.status_code, 200)
data = jsonloads(res.content)
self.assertEqual(data['data']['animal_count'], 1)
def test_add_animal(self):
animal = Animal(name='Bokito', zoo=self.zoo, caretaker=self.caretaker)
animal.save()
res = self.client.get('/caretaker/{}/'.format(self.caretaker.pk))
self.assertEqual(res.status_code, 200)
data = jsonloads(res.content)
self.assertEqual(data['data']['animal_count'], 2)
def test_order_by_animal_count(self):
caretaker_2 = Caretaker(name='caretaker 2')
caretaker_2.save()
caretaker_3 = Caretaker(name='caretaker 3')
caretaker_3.save()
for i in range(3):
Animal(
name='animal 2 {}'.format(i),
zoo=self.zoo,
caretaker=caretaker_2,
).save()
for i in range(2):
Animal(
name='animal 3 {}'.format(i),
zoo=self.zoo,
caretaker=caretaker_3,
).save()
res = self.client.get('/caretaker/?order_by=-animal_count')
self.assertEqual(res.status_code, 200)
data = jsonloads(res.content)
order = [ct['id'] for ct in data['data']]
self.assertEqual(order, [caretaker_2.pk, caretaker_3.pk, self.caretaker.pk])
self.assertEqual(data['data'][0]['best_animal'], 'animal 2 2')
def test_f_expression(self):
self.caretaker.ssn = 'blablabla'
self.caretaker.save()
res = self.client.get('/caretaker/{}/'.format(self.caretaker.pk))
self.assertEqual(res.status_code, 200)
data = jsonloads(res.content)
self.assertEqual(data['data']['bsn'], 'blablabla')
def test_filter_on_animal_count(self):
caretaker_2 = Caretaker(name='caretaker 2')
caretaker_2.save()
caretaker_3 = Caretaker(name='caretaker 3')
caretaker_3.save()
for i in range(3):
Animal(
name='animal 2 {}'.format(i),
zoo=self.zoo,
caretaker=caretaker_2,
).save()
for i in range(2):
Animal(
name='animal 3 {}'.format(i),
zoo=self.zoo,
caretaker=caretaker_3,
).save()
res = self.client.get('/caretaker/?.animal_count=2')
self.assertEqual(res.status_code, 200)
data = jsonloads(res.content)
order = [ct['id'] for ct in data['data']]
self.assertEqual(order, [caretaker_3.pk])
def test_filter_on_animal_count_nested(self):
caretaker_2 = Caretaker(name='caretaker 2')
caretaker_2.save()
caretaker_3 = Caretaker(name='caretaker 3')
caretaker_3.save()
for i in range(3):
Animal(
name='animal 2 {}'.format(i),
zoo=self.zoo,
caretaker=caretaker_2,
).save()
animal_pks = set()
for i in range(2):
animal = Animal(
name='animal 3 {}'.format(i),
zoo=self.zoo,
caretaker=caretaker_3,
)
animal.save()
animal_pks.add(animal.pk)
res = self.client.get('/animal/?.caretaker.animal_count=2')
self.assertEqual(res.status_code, 200)
data = jsonloads(res.content)
pks = {a['id'] for a in data['data']}
self.assertEqual(pks, animal_pks)
def test_context_annotation(self):
zoo = Zoo(name='Apenheul')
zoo.save()
harambe = Animal(zoo=zoo, name='Harambe')
harambe.save()
bokito = Animal(zoo=zoo, name='Bokito')
bokito.save()
res = self.client.get('/animal/{}/'.format(self.animal.pk))
self.assertEqual(res.status_code, 200)
data = jsonloads(res.content)
self.assertEqual(data['data']['name'], 'Harambe')
self.assertEqual(data['data']['prefixed_name'], 'Sir Harambe')
res = self.client.get('/animal/{}/?animal_name_prefix=Lady'.format(self.animal.pk))
self.assertEqual(res.status_code, 200)
data = jsonloads(res.content)
self.assertEqual(data['data']['name'], 'Harambe')
self.assertEqual(data['data']['prefixed_name'], 'Lady Harambe')
def test_save_unriwttable_annotation(self):
res = self.client.put('/caretaker/%s/' % self.caretaker.pk, data=jsondumps({'animal_count': 2}), content_type='application/json')
self.assertEqual(res.status_code, 200)
data = jsonloads(res.content)
self.assertEqual(data['_meta']['ignored_fields'], ['animal_count'])
self.assertEqual(data['animal_count'], 1)
class IncludeAnnotationsTest(TestCase):
def setUp(self):
super().setUp()
u = User(username='testuser', is_active=True, is_superuser=True)
u.set_password('test')
u.save()
self.client = Client()
r = self.client.login(username='testuser', password='test')
self.assertTrue(r)
self.caretaker = Caretaker(name='carl')
self.caretaker.save()
self.zoo = Zoo(name='Apenheul')
self.zoo.save()
self.animal = Animal(name='Harambe', zoo=self.zoo, caretaker=self.caretaker)
self.animal.save()
def test_include_one_annotation(self):
res = self.client.get(
'/caretaker/{}/?include_annotations=animal_count'.format(self.caretaker.pk)
)
self.assertEqual(res.status_code, 200)
data = jsonloads(res.content)
self.assertIn('animal_count', data['data'])
self.assertNotIn('best_animal', data['data'])
self.assertNotIn('bsn', data['data'])
self.assertNotIn('last_present', data['data'])
self.assertNotIn('scary', data['data'])
def test_exclude_one_annotation(self):
res = self.client.get(
'/caretaker/{}/?include_annotations=*,-animal_count'.format(self.caretaker.pk)
)
self.assertEqual(res.status_code, 200)
data = jsonloads(res.content)
self.assertNotIn('animal_count', data['data'])
self.assertIn('best_animal', data['data'])
self.assertIn('bsn', data['data'])
self.assertIn('last_present', data['data'])
self.assertNotIn('scary', data['data'])
def test_include_optional_annotation(self):
res = self.client.get(
'/caretaker/{}/?include_annotations=*,scary'.format(self.caretaker.pk)
)
self.assertEqual(res.status_code, 200)
data = jsonloads(res.content)
self.assertIn('animal_count', data['data'])
self.assertIn('best_animal', data['data'])
self.assertIn('bsn', data['data'])
self.assertIn('last_present', data['data'])
self.assertIn('scary', data['data'])
def test_include_no_annotations(self):
res = self.client.get(
'/caretaker/{}/?include_annotations='.format(self.caretaker.pk)
)
self.assertEqual(res.status_code, 200)
data = jsonloads(res.content)
self.assertNotIn('animal_count', data['data'])
self.assertNotIn('best_animal', data['data'])
self.assertNotIn('bsn', data['data'])
self.assertNotIn('last_present', data['data'])
self.assertNotIn('scary', data['data'])
def test_relation_include_one_annotation(self):
res = self.client.get(
'/animal/{}/?with=caretaker&include_annotations=caretaker(animal_count)'.format(self.animal.pk)
)
self.assertEqual(res.status_code, 200)
data = jsonloads(res.content)
self.assertIn('animal_count', data['with']['caretaker'][0])
self.assertNotIn('best_animal', data['with']['caretaker'][0])
self.assertNotIn('bsn', data['with']['caretaker'][0])
self.assertNotIn('last_present', data['with']['caretaker'][0])
self.assertNotIn('scary', data['with']['caretaker'][0])
def test_relation_exclude_one_annotation(self):
res = self.client.get(
'/animal/{}/?with=caretaker&include_annotations=caretaker(*,-animal_count)'.format(self.animal.pk)
)
self.assertEqual(res.status_code, 200)
data = jsonloads(res.content)
self.assertNotIn('animal_count', data['with']['caretaker'][0])
self.assertIn('best_animal', data['with']['caretaker'][0])
self.assertIn('bsn', data['with']['caretaker'][0])
self.assertIn('last_present', data['with']['caretaker'][0])
self.assertNotIn('scary', data['with']['caretaker'][0])
def test_relation_include_optional_annotation(self):
res = self.client.get(
'/animal/{}/?with=caretaker&include_annotations=caretaker(*,scary)'.format(self.animal.pk)
)
self.assertEqual(res.status_code, 200)
data = jsonloads(res.content)
self.assertIn('animal_count', data['with']['caretaker'][0])
self.assertIn('best_animal', data['with']['caretaker'][0])
self.assertIn('bsn', data['with']['caretaker'][0])
self.assertIn('last_present', data['with']['caretaker'][0])
self.assertIn('scary', data['with']['caretaker'][0])
def test_relation_include_no_annotations(self):
res = self.client.get(
'/animal/{}/?with=caretaker&include_annotations=caretaker()'.format(self.animal.pk)
)
self.assertEqual(res.status_code, 200)
data = jsonloads(res.content)
self.assertNotIn('animal_count', data['with']['caretaker'][0])
self.assertNotIn('best_animal', data['with']['caretaker'][0])
self.assertNotIn('bsn', data['with']['caretaker'][0])
self.assertNotIn('last_present', data['with']['caretaker'][0])
self.assertNotIn('scary', data['with']['caretaker'][0])
def test_filter_on_relation_with_include_annotations(self):
res = self.client.get(
'/caretaker/?include_annotations=scary&.animals.name=Harambe'
)
self.assertEqual(res.status_code, 200)
data = jsonloads(res.content)
self.assertEqual(len(data['data']), 1)
self.assertNotIn('animal_count', data['data'][0])
self.assertNotIn('best_animal', data['data'][0])
self.assertNotIn('bsn', data['data'][0])
self.assertNotIn('last_present', data['data'][0])
self.assertIn('scary', data['data'][0])
|
|
# -*- coding: utf-8 -*-
# Python module: ModbusClient class (Client ModBus/TCP class 1)
from . import constants as const
import re
import socket
import select
import struct
import random
class ModbusClient:
"""Client Modbus TCP"""
def __init__(self, host=None, port=None, unit_id=None, timeout=None,
debug=None, auto_open=None, auto_close=None):
"""Constructor
Modbus server params (host, port) can be set here or with host(), port()
functions. Same for debug option.
Use functions avoid to launch ValueError except if params is incorrect.
:param host: hostname or IPv4/IPv6 address server address (optional)
:type host: str
:param port: TCP port number (optional)
:type port: int
:param unit_id: unit ID (optional)
:type unit_id: int
:param timeout: socket timeout in seconds (optional)
:type timeout: float
:param debug: debug state (optional)
:type debug: bool
:param auto_open: auto TCP connect (optional)
:type auto_open: bool
:param auto_close: auto TCP close (optional)
:type auto_close: bool
:return: Object ModbusClient
:rtype: ModbusClient
:raises ValueError: if a set parameter value is incorrect
"""
# object vars
self.__hostname = "localhost"
self.__port = const.MODBUS_PORT
self.__unit_id = 1
self.__timeout = 30.0 # socket timeout
self.__debug = False # debug trace on/off
self.__auto_open = False # auto TCP connect
self.__auto_close = False # auto TCP close
self.__mode = const.MODBUS_TCP # default is Modbus/TCP
self.__sock = None # socket handle
self.__hd_tr_id = 0 # store transaction ID
self.__version = const.VERSION # version number
self.__last_error = const.MB_NO_ERR # last error code
self.__last_except = 0 # last expect code
# set host
if host:
if not self.host(host):
raise ValueError("host value error", host)
# set port
if port:
if not self.port(port):
raise ValueError("port value error")
# set unit_id
if unit_id:
if not self.unit_id(unit_id):
raise ValueError("unit_id value error")
# set timeout
if timeout:
if not self.timeout(timeout):
raise ValueError("timeout value error")
# set debug
if debug:
if not self.debug(debug):
raise ValueError("debug value error")
# set auto_open
if auto_open:
if not self.auto_open(auto_open):
raise ValueError("auto_open value error")
# set auto_close
if auto_close:
if not self.auto_close(auto_close):
raise ValueError("auto_close value error")
def version(self):
"""Get package version
:return: current version of the package (like "0.0.1")
:rtype: str
"""
return self.__version
def last_error(self):
"""Get last error code
:return: last error code
:rtype: int
"""
return self.__last_error
def last_except(self):
"""Get last except code
:return: last except code
:rtype: int
"""
return self.__last_except
def host(self, hostname=None):
"""Get or set host (IPv4/IPv6 or hostname like 'plc.domain.net')
:param hostname: hostname or IPv4/IPv6 address or None for get value
:type hostname: str or None
:returns: hostname or None if set fail
:rtype: str or None
"""
if (hostname is None) or (hostname is self.__hostname):
return self.__hostname
# when hostname change ensure old socket is close
self.close()
# IPv4 ?
if re.match("^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$", hostname):
self.__hostname = hostname
return self.__hostname
# IPv6 ?
elif re.match("^(?:[A-F0-9]{1,4}:){7}[A-F0-9]{1,4}$", hostname):
self.__hostname = hostname
return self.__hostname
# DNS name ?
elif re.match("^[a-zA-Z\-\_][a-zA-Z0-9\.\-\_]+$", hostname):
self.__hostname = hostname
return self.__hostname
else:
return None
def port(self, port=None):
"""Get or set TCP port
:param port: TCP port number or None for get value
:type port: int or None
:returns: TCP port or None if set fail
:rtype: int or None
"""
if (port is None) or (port is self.__port):
return self.__port
# when port change ensure old socket is close
self.close()
# valid port ?
if (0 < int(port) < 65536):
self.__port = int(port)
return self.__port
else:
return None
def unit_id(self, unit_id=None):
"""Get or set unit ID field
:param unit_id: unit ID (0 to 255) or None for get value
:type unit_id: int or None
:returns: unit ID or None if set fail
:rtype: int or None
"""
if unit_id is None:
return self.__unit_id
if (0 <= int(unit_id) < 256):
self.__unit_id = int(unit_id)
return self.__unit_id
else:
return None
def timeout(self, timeout=None):
"""Get or set timeout field
:param timeout: socket timeout in seconds or None for get value
:type timeout: float or None
:returns: timeout or None if set fail
:rtype: float or None
"""
if timeout is None:
return self.__timeout
if (0 < float(timeout) < 3600):
self.__timeout = float(timeout)
return self.__timeout
else:
return None
def debug(self, state=None):
"""Get or set debug mode
:param state: debug state or None for get value
:type state: bool or None
:returns: debug state or None if set fail
:rtype: bool or None
"""
if state is None:
return self.__debug
self.__debug = bool(state)
return self.__debug
def auto_open(self, state=None):
"""Get or set automatic TCP connect mode
:param state: auto_open state or None for get value
:type state: bool or None
:returns: auto_open state or None if set fail
:rtype: bool or None
"""
if state is None:
return self.__auto_open
self.__auto_open = bool(state)
return self.__auto_open
def auto_close(self, state=None):
"""Get or set automatic TCP close mode (after each request)
:param state: auto_close state or None for get value
:type state: bool or None
:returns: auto_close state or None if set fail
:rtype: bool or None
"""
if state is None:
return self.__auto_close
self.__auto_close = bool(state)
return self.__auto_close
def mode(self, mode=None):
"""Get or set modbus mode (TCP or RTU)
:param mode: mode (MODBUS_TCP/MODBUS_RTU) to set or None for get value
:type mode: int
:returns: mode or None if set fail
:rtype: int or None
"""
if mode is None:
return self.__mode
if (mode == const.MODBUS_TCP or mode == const.MODBUS_RTU):
self.__mode = mode
return self.__mode
else:
return None
def open(self):
"""Connect to modbus server (open TCP connection)
:returns: connect status (True if open)
:rtype: bool
"""
# restart TCP if already open
if self.is_open():
self.close()
# init socket and connect
# list available sockets on the target host/port
# AF_xxx : AF_INET -> IPv4, AF_INET6 -> IPv6,
# AF_UNSPEC -> IPv6 (priority on some system) or 4
# list available socket on target host
for res in socket.getaddrinfo(self.__hostname, self.__port,
socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.__sock = socket.socket(af, socktype, proto)
except socket.error:
self.__sock = None
continue
try:
self.__sock.settimeout(self.__timeout)
self.__sock.connect(sa)
except socket.error:
self.__sock.close()
self.__sock = None
continue
break
# check connect status
if self.__sock is not None:
return True
else:
self.__last_error = const.MB_CONNECT_ERR
self.__debug_msg("connect error")
return False
def is_open(self):
"""Get status of TCP connection
:returns: status (True for open)
:rtype: bool
"""
return self.__sock is not None
def close(self):
"""Close TCP connection
:returns: close status (True for close/None if already close)
:rtype: bool or None
"""
if self.__sock:
self.__sock.close()
self.__sock = None
return True
else:
return None
def read_coils(self, bit_addr, bit_nb=1):
"""Modbus function READ_COILS (0x01)
:param bit_addr: bit address (0 to 65535)
:type bit_addr: int
:param bit_nb: number of bits to read (1 to 2000)
:type bit_nb: int
:returns: bits list or None if error
:rtype: list of bool or None
"""
# check params
if not (0 <= int(bit_addr) <= 65535):
self.__debug_msg("read_coils() : bit_addr out of range")
return None
if not (1 <= int(bit_nb) <= 2000):
self.__debug_msg("read_coils() : bit_nb out of range")
return None
if (int(bit_addr) + int(bit_nb)) > 65536:
self.__debug_msg("read_coils() : read after ad 65535")
return None
# build frame
tx_buffer = self._mbus_frame(const.READ_COILS,
struct.pack(">HH", bit_addr, bit_nb))
# send request
s_send = self._send_mbus(tx_buffer)
# check error
if not s_send:
return None
# receive
f_body = self._recv_mbus()
# check error
if not f_body:
return None
# check min frame body size
if len(f_body) < 2:
self.__last_error = const.MB_RECV_ERR
self.__debug_msg("read_coils(): rx frame under min size")
self.close()
return None
# extract field "byte count"
rx_byte_count = struct.unpack("B", f_body[0:1])[0]
# frame with bits value -> bits[] list
f_bits = bytearray(f_body[1:])
# check rx_byte_count: match nb of bits request and check buffer size
if not ((rx_byte_count == int((bit_nb + 7) / 8)) and
(rx_byte_count == len(f_bits))):
self.__last_error = const.MB_RECV_ERR
self.__debug_msg("read_coils(): rx byte count mismatch")
self.close()
return None
# allocate a bit_nb size list
bits = [None] * bit_nb
# fill bits list with bit items
for i, item in enumerate(bits):
bits[i] = bool(f_bits[int(i / 8)] >> (i % 8) & 0x01)
# return bits list
return bits
def read_discrete_inputs(self, bit_addr, bit_nb=1):
"""Modbus function READ_DISCRETE_INPUTS (0x02)
:param bit_addr: bit address (0 to 65535)
:type bit_addr: int
:param bit_nb: number of bits to read (1 to 2000)
:type bit_nb: int
:returns: bits list or None if error
:rtype: list of bool or None
"""
# check params
if not (0 <= int(bit_addr) <= 65535):
self.__debug_msg("read_discrete_inputs() : bit_addr out of range")
return None
if not (1 <= int(bit_nb) <= 2000):
self.__debug_msg("read_discrete_inputs() : bit_nb out of range")
return None
if (int(bit_addr) + int(bit_nb)) > 65536:
self.__debug_msg("read_discrete_inputs() : read after ad 65535")
return None
# build frame
tx_buffer = self._mbus_frame(const.READ_DISCRETE_INPUTS,
struct.pack(">HH", bit_addr, bit_nb))
# send request
s_send = self._send_mbus(tx_buffer)
# check error
if not s_send:
return None
# receive
f_body = self._recv_mbus()
# check error
if not f_body:
return None
# check min frame body size
if len(f_body) < 2:
self.__last_error = const.MB_RECV_ERR
self.__debug_msg("read_discrete_inputs(): rx frame under min size")
self.close()
return None
# extract field "byte count"
rx_byte_count = struct.unpack("B", f_body[0:1])[0]
# frame with bits value -> bits[] list
f_bits = bytearray(f_body[1:])
# check rx_byte_count: match nb of bits request and check buffer size
if not ((rx_byte_count == int((bit_nb + 7) / 8)) and
(rx_byte_count == len(f_bits))):
self.__last_error = const.MB_RECV_ERR
self.__debug_msg("read_discrete_inputs(): rx byte count mismatch")
self.close()
return None
# allocate a bit_nb size list
bits = [None] * bit_nb
# fill bits list with bit items
for i, item in enumerate(bits):
bits[i] = bool(f_bits[int(i / 8)] >> (i % 8) & 0x01)
# return bits list
return bits
def read_holding_registers(self, reg_addr, reg_nb=1):
"""Modbus function READ_HOLDING_REGISTERS (0x03)
:param reg_addr: register address (0 to 65535)
:type reg_addr: int
:param reg_nb: number of registers to read (1 to 125)
:type reg_nb: int
:returns: registers list or None if fail
:rtype: list of int or None
"""
# check params
if not (0 <= int(reg_addr) <= 65535):
self.__debug_msg(
"read_holding_registers() : reg_addr out of range")
return None
if not (1 <= int(reg_nb) <= 125):
self.__debug_msg("read_holding_registers() : reg_nb out of range")
return None
if (int(reg_addr) + int(reg_nb)) > 65536:
self.__debug_msg("read_holding_registers() : read after ad 65535")
return None
# build frame
tx_buffer = self._mbus_frame(const.READ_HOLDING_REGISTERS,
struct.pack(">HH", reg_addr, reg_nb))
# send request
s_send = self._send_mbus(tx_buffer)
# check error
if not s_send:
return None
# receive
f_body = self._recv_mbus()
# check error
if not f_body:
return None
# check min frame body size
if len(f_body) < 2:
self.__last_error = const.MB_RECV_ERR
self.__debug_msg("read_holding_registers(): " +
"rx frame under min size")
self.close()
return None
# extract field "byte count"
rx_byte_count = struct.unpack("B", f_body[0:1])[0]
# frame with regs value
f_regs = f_body[1:]
# check rx_byte_count: match nb of bits request and check buffer size
if not ((rx_byte_count == 2 * reg_nb) and
(rx_byte_count == len(f_regs))):
self.__last_error = const.MB_RECV_ERR
self.__debug_msg(
"read_holding_registers(): rx byte count mismatch")
self.close()
return None
# allocate a reg_nb size list
registers = [None] * reg_nb
# fill registers list with register items
for i, item in enumerate(registers):
registers[i] = struct.unpack(">H", f_regs[i * 2:i * 2 + 2])[0]
# return registers list
return registers
def read_input_registers(self, reg_addr, reg_nb=1):
"""Modbus function READ_INPUT_REGISTERS (0x04)
:param reg_addr: register address (0 to 65535)
:type reg_addr: int
:param reg_nb: number of registers to read (1 to 125)
:type reg_nb: int
:returns: registers list or None if fail
:rtype: list of int or None
"""
# check params
if not (0 <= int(reg_addr) <= 65535):
self.__debug_msg("read_input_registers() : reg_addr out of range")
return None
if not (1 <= int(reg_nb) <= 125):
self.__debug_msg("read_input_registers() : reg_nb out of range")
return None
if (int(reg_addr) + int(reg_nb)) > 65536:
self.__debug_msg("read_input_registers() : read after ad 65535")
return None
# build frame
tx_buffer = self._mbus_frame(const.READ_INPUT_REGISTERS,
struct.pack(">HH", reg_addr, reg_nb))
# send request
s_send = self._send_mbus(tx_buffer)
# check error
if not s_send:
return None
# receive
f_body = self._recv_mbus()
# check error
if not f_body:
return None
# check min frame body size
if len(f_body) < 2:
self.__last_error = const.MB_RECV_ERR
self.__debug_msg("read_input_registers(): rx frame under min size")
self.close()
return None
# extract field "byte count"
rx_byte_count = struct.unpack("B", f_body[0:1])[0]
# frame with regs value
f_regs = f_body[1:]
# check rx_byte_count: match nb of bits request and check buffer size
if not ((rx_byte_count == 2 * reg_nb) and
(rx_byte_count == len(f_regs))):
self.__last_error = const.MB_RECV_ERR
self.__debug_msg("read_input_registers(): rx byte count mismatch")
self.close()
return None
# allocate a reg_nb size list
registers = [None] * reg_nb
# fill registers list with register items
for i, item in enumerate(registers):
registers[i] = struct.unpack(">H", f_regs[i * 2:i * 2 + 2])[0]
# return registers list
return registers
def write_single_coil(self, bit_addr, bit_value):
"""Modbus function WRITE_SINGLE_COIL (0x05)
:param bit_addr: bit address (0 to 65535)
:type bit_addr: int
:param bit_value: bit value to write
:type bit_value: bool
:returns: True if write ok or None if fail
:rtype: bool or None
"""
# check params
if not (0 <= int(bit_addr) <= 65535):
self.__debug_msg("write_single_coil() : bit_addr out of range")
return None
# build frame
bit_value = 0xFF if bit_value else 0x00
tx_buffer = self._mbus_frame(const.WRITE_SINGLE_COIL,
struct.pack(">HBB",
bit_addr, bit_value, 0))
# send request
s_send = self._send_mbus(tx_buffer)
# check error
if not s_send:
return None
# receive
f_body = self._recv_mbus()
# check error
if not f_body:
return None
# check fix frame size
if len(f_body) != 4:
self.__last_error = const.MB_RECV_ERR
self.__debug_msg("write_single_coil(): rx frame size error")
self.close()
return None
# register extract
(rx_bit_addr, rx_bit_value, rx_padding) = struct.unpack(">HBB",
f_body[:4])
# check bit write
is_ok = (rx_bit_addr == bit_addr) and (rx_bit_value == bit_value)
return True if is_ok else None
def write_single_register(self, reg_addr, reg_value):
"""Modbus function WRITE_SINGLE_REGISTER (0x06)
:param reg_addr: register address (0 to 65535)
:type reg_addr: int
:param reg_value: register value to write
:type reg_value: int
:returns: True if write ok or None if fail
:rtype: bool or None
"""
# check params
if not (0 <= int(reg_addr) <= 65535):
self.__debug_msg("write_single_register() : reg_addr out of range")
return None
if not (0 <= int(reg_value) <= 65535):
self.__debug_msg(
"write_single_register() : reg_value out of range")
return None
# build frame
tx_buffer = self._mbus_frame(const.WRITE_SINGLE_REGISTER,
struct.pack(">HH", reg_addr, reg_value))
# send request
s_send = self._send_mbus(tx_buffer)
# check error
if not s_send:
return None
# receive
f_body = self._recv_mbus()
# check error
if not f_body:
return None
# check fix frame size
if len(f_body) != 4:
self.__last_error = const.MB_RECV_ERR
self.__debug_msg("write_single_register(): rx frame size error")
self.close()
return None
# register extract
rx_reg_addr, rx_reg_value = struct.unpack(">HH", f_body)
# check register write
is_ok = (rx_reg_addr == reg_addr) and (rx_reg_value == reg_value)
return True if is_ok else None
def write_multiple_registers(self, reg_addr, regs_value):
"""Modbus function WRITE_MULTIPLE_REGISTERS (0x10)
:param reg_addr: registers address (0 to 65535)
:type reg_addr: int
:param reg_value: registers value to write
:type reg_value: list
:returns: True if write ok or None if fail
:rtype: bool or None
"""
# number of registers to write
regs_nb = len(regs_value)
# check params
if not (0 <= int(reg_addr) <= 65535):
self.__debug_msg("write_multiple_registers() : " +
"reg_addr out of range")
return None
if not (1 <= int(regs_nb) <= 125):
self.__debug_msg("write_multiple_registers() : " +
"reg_nb out of range")
return None
if (int(reg_addr) + int(regs_nb)) > 65536:
self.__debug_msg("write_multiple_registers() : " +
"write after ad 65535")
return None
# build frame
# format reg value string
regs_val_str = b""
for reg in regs_value:
# check current register value
if not (0 <= int(reg) <= 65535):
self.__debug_msg("write_multiple_registers() : " +
"regs_value out of range")
return None
# pack register for build frame
regs_val_str += struct.pack(">H", reg)
bytes_nb = len(regs_val_str)
# format modbus frame body
body = struct.pack(">HHB", reg_addr, regs_nb, bytes_nb) + regs_val_str
tx_buffer = self._mbus_frame(const.WRITE_MULTIPLE_REGISTERS, body)
# send request
s_send = self._send_mbus(tx_buffer)
# check error
if not s_send:
return None
# receive
f_body = self._recv_mbus()
# check error
if not f_body:
return None
# check fix frame size
if len(f_body) != 4:
self.__last_error = const.MB_RECV_ERR
self.__debug_msg("write_multiple_registers(): rx frame size error")
self.close()
return None
# register extract
(rx_reg_addr, rx_reg_nb) = struct.unpack(">HH", f_body[:4])
# check regs write
is_ok = (rx_reg_addr == reg_addr)
return True if is_ok else None
def _can_read(self):
"""Wait data available for socket read
:returns: True if data available or None if timeout or socket error
:rtype: bool or None
"""
if self.__sock is None:
return None
if select.select([self.__sock], [], [], self.__timeout)[0]:
return True
else:
self.__last_error = const.MB_TIMEOUT_ERR
self.__debug_msg("timeout error")
self.close()
return None
def _send(self, data):
"""Send data over current socket
:param data: registers value to write
:type data: str (Python2) or class bytes (Python3)
:returns: True if send ok or None if error
:rtype: bool or None
"""
# check link
if self.__sock is None:
self.__debug_msg("call _send on close socket")
return None
# send
data_l = len(data)
try:
send_l = self.__sock.send(data)
except socket.error:
send_l = None
# handle send error
if (send_l is None) or (send_l != data_l):
self.__last_error = const.MB_SEND_ERR
self.__debug_msg("_send error")
self.close()
return None
else:
return send_l
def _recv(self, max_size):
"""Receive data over current socket
:param max_size: number of bytes to receive
:type max_size: int
:returns: receive data or None if error
:rtype: str (Python2) or class bytes (Python3) or None
"""
# wait for read
if not self._can_read():
self.close()
return None
# recv
try:
r_buffer = self.__sock.recv(max_size)
except socket.error:
r_buffer = None
# handle recv error
if not r_buffer:
self.__last_error = const.MB_RECV_ERR
self.__debug_msg("_recv error")
self.close()
return None
return r_buffer
def _send_mbus(self, frame):
"""Send modbus frame
:param frame: modbus frame to send (with MBAP for TCP/CRC for RTU)
:type frame: str (Python2) or class bytes (Python3)
:returns: number of bytes send or None if error
:rtype: int or None
"""
# for auto_open mode, check TCP and open if need
if self.__auto_open and not self.is_open():
self.open()
# send request
bytes_send = self._send(frame)
if bytes_send:
if self.__debug:
self._pretty_dump('Tx', frame)
return bytes_send
else:
return None
def _recv_mbus(self):
"""Receive a modbus frame
:returns: modbus frame body or None if error
:rtype: str (Python2) or class bytes (Python3) or None
"""
# receive
# modbus TCP receive
if self.__mode == const.MODBUS_TCP:
# 7 bytes header (mbap)
rx_buffer = self._recv(7)
# check recv
if not (rx_buffer and len(rx_buffer) == 7):
self.__last_error = const.MB_RECV_ERR
self.__debug_msg("_recv MBAP error")
self.close()
return None
rx_frame = rx_buffer
# decode header
(rx_hd_tr_id, rx_hd_pr_id,
rx_hd_length, rx_hd_unit_id) = struct.unpack(">HHHB", rx_frame)
# check header
if not ((rx_hd_tr_id == self.__hd_tr_id) and
(rx_hd_pr_id == 0) and
(rx_hd_length < 256) and
(rx_hd_unit_id == self.__unit_id)):
self.__last_error = const.MB_RECV_ERR
self.__debug_msg("MBAP format error")
self.close()
return None
# end of frame
rx_buffer = self._recv(rx_hd_length - 1)
if not (rx_buffer and
(len(rx_buffer) == rx_hd_length - 1) and
(len(rx_buffer) >= 2)):
self.__last_error = const.MB_RECV_ERR
self.__debug_msg("_recv frame body error")
self.close()
return None
rx_frame += rx_buffer
# dump frame
if self.__debug:
self._pretty_dump('Rx', rx_frame)
# body decode
rx_bd_fc = struct.unpack("B", rx_buffer[0:1])[0]
f_body = rx_buffer[1:]
# modbus RTU receive
elif self.__mode == const.MODBUS_RTU:
# receive modbus RTU frame (max size is 256 bytes)
rx_buffer = self._recv(256)
# on _recv error
if not rx_buffer:
return None
rx_frame = rx_buffer
# dump frame
if self.__debug:
self._pretty_dump('Rx', rx_frame)
# RTU frame min size is 5 bytes
if len(rx_buffer) < 5:
self.__last_error = const.MB_RECV_ERR
self.__debug_msg("short frame error")
self.close()
return None
# check CRC
if not self._crc_is_ok(rx_frame):
self.__last_error = const.MB_CRC_ERR
self.__debug_msg("CRC error")
self.close()
return None
# body decode
(rx_unit_id, rx_bd_fc) = struct.unpack("BB", rx_frame[:2])
# check
if not (rx_unit_id == self.__unit_id):
self.__last_error = const.MB_RECV_ERR
self.__debug_msg("unit ID mismatch error")
self.close()
return None
# format f_body: remove unit ID, function code and CRC 2 last bytes
f_body = rx_frame[2:-2]
# for auto_close mode, close socket after each request
if self.__auto_close:
self.close()
# check except
if rx_bd_fc > 0x80:
# except code
exp_code = struct.unpack("B", f_body[0:1])[0]
self.__last_error = const.MB_EXCEPT_ERR
self.__last_except = exp_code
self.__debug_msg("except (code " + str(exp_code) + ")")
return None
else:
# return
return f_body
def _mbus_frame(self, fc, body):
"""Build modbus frame (add MBAP for Modbus/TCP, slave AD + CRC for RTU)
:param fc: modbus function code
:type fc: int
:param body: modbus frame body
:type body: str (Python2) or class bytes (Python3)
:returns: modbus frame
:rtype: str (Python2) or class bytes (Python3)
"""
# build frame body
f_body = struct.pack("B", fc) + body
# modbus/TCP
if self.__mode == const.MODBUS_TCP:
# build frame ModBus Application Protocol header (mbap)
self.__hd_tr_id = random.randint(0, 65535)
tx_hd_pr_id = 0
tx_hd_length = len(f_body) + 1
f_mbap = struct.pack(">HHHB", self.__hd_tr_id, tx_hd_pr_id,
tx_hd_length, self.__unit_id)
return f_mbap + f_body
# modbus RTU
elif self.__mode == const.MODBUS_RTU:
# format [slave addr(unit_id)]frame_body[CRC16]
slave_ad = struct.pack("B", self.__unit_id)
return self._add_crc(slave_ad + f_body)
def _pretty_dump(self, label, data):
"""Print modbus/TCP frame ("[header]body")
or RTU ("body[CRC]") on stdout
:param label: modbus function code
:type label: str
:param data: modbus frame
:type data: str (Python2) or class bytes (Python3)
"""
# split data string items to a list of hex value
dump = ["%02X" % c for c in bytearray(data)]
# format for TCP or RTU
if self.__mode == const.MODBUS_TCP:
if len(dump) > 6:
# "[MBAP] ..."
dump[0] = "[" + dump[0]
dump[6] = dump[6] + "]"
elif self.__mode == const.MODBUS_RTU:
if len(dump) > 4:
# "... [CRC]"
dump[-2] = "[" + dump[-2]
dump[-1] = dump[-1] + "]"
# print result
print(label)
s = ""
for i in dump:
s += i + " "
print(s)
def _crc(self, frame):
"""Compute modbus CRC16 (for RTU mode)
:param label: modbus frame
:type label: str (Python2) or class bytes (Python3)
:returns: CRC16
:rtype: int
"""
crc = 0xFFFF
for index, item in enumerate(bytearray(frame)):
next_byte = item
crc ^= next_byte
for i in range(8):
lsb = crc & 1
crc >>= 1
if lsb:
crc ^= 0xA001
return crc
def _add_crc(self, frame):
"""Add CRC to modbus frame (for RTU mode)
:param label: modbus RTU frame
:type label: str (Python2) or class bytes (Python3)
:returns: modbus RTU frame with CRC
:rtype: str (Python2) or class bytes (Python3)
"""
crc = struct.pack("<H", self._crc(frame))
return frame + crc
def _crc_is_ok(self, frame):
"""Check the CRC of modbus RTU frame
:param label: modbus RTU frame with CRC
:type label: str (Python2) or class bytes (Python3)
:returns: status CRC (True for valid)
:rtype: bool
"""
return (self._crc(frame) == 0)
def __debug_msg(self, msg):
"""Print debug message if debug mode is on
:param msg: debug message
:type msg: str
"""
if self.__debug:
print(msg)
|
|
from flask import Flask, jsonify, render_template, request
from db import readDB, writeDB, updateDB
import random
import time
app = Flask(__name__)
@app.route('/_validateUser')
def getUsers():
email = request.args['email']
password = request.args['password']
users = readDB("data/users.json")["users"]
user_type = {"type":-1}
for user in users:
if user["email"] == email and user["password"] == password:
user_type = {"type":user["type"]}
return jsonify(user_type)
@app.route('/_getStudentDocuments')
def get_StudentDocuments():
documents = readDB('data/student_documents.json')
return jsonify(documents)
@app.route('/_getProfessorDocuments')
def get_ProfessorDocuments():
documents = readDB('data/professor_documents.json')
return jsonify(documents)
@app.route('/_getAssistantDocuments')
def get_AssistantDocuments():
documents = readDB('data/assistant_documents.json')
return jsonify(documents)
@app.route('/_getDirectorDocuments')
def get_DirectorDocuments():
documents = readDB('data/director_documents.json')
return jsonify(documents)
@app.route('/_newAssistantship')
def newAssistantship():
# documents = readDB('data/student_documents.json')
return render_template("new_assistantship")
@app.route('/_getDocument')
def getDocument():
print "Entre a Get Document"
doc_id = request.args["doc_id"]
user_type = request.args["user_type"]
print "doc_id",doc_id
print "user_type",user_type
if int(user_type) == 0:
documents = readDB('data/student_documents.json')["documents"]
if int(user_type) == 1:
documents = readDB('data/professor_documents.json')["documents"]
if int(user_type) == 2:
documents = readDB('data/assistant_documents.json')["documents"]
if int(user_type) == 3:
documents = readDB('data/director_documents.json')["documents"]
print "documents",documents
for document in documents:
if int(document["id"]) == int(doc_id):
doc = document
return jsonify(doc)
@app.route('/_insertAssistantship')
def insertAssistantship():
user_type = request.args['user_type']
advisor = request.args['advisor']
project = request.args['project']
task = request.args['task']
assistantship_type = request.args['a_type']
student = request.args['student']
print "user_type", user_type
print "advisor", advisor
print "project", project
print "task", task
print "assistantship_type", assistantship_type
print "student", student
# student: student
inserted = {'status':-1}
# Setting values for document to be inserted
doc_id = random.randint(1, 999999999)
if assistantship_type == "Research":
name = "2016 Research Assistantship Request"
else:
name = "2016 TA Assistantship Request"
# If document is being sent by student
if int(user_type) == 0:
document = {}
document["id"] = doc_id
document["name"] = name
document["type"] = "Assistantship Request"
document["date"] = (time.strftime("%d/%m/%Y %H:%M:%S"))
document["status"] = "Created"
document["faculty"] = "Electrical Engineering"
document["major"] = "computer Engineering"
document["progress"] = 0
document["advisor"] = advisor
document["project"] = project
document["task"] = task
document["student"] = student
document["requester"] = "Student: Jessica Cotrina"
document["sent_status"] = ""
document["last_edited"] = "Student: Jessica Cotrina"
document["student_number"] = "502-15-6168"
print document
writeDB('data/student_documents.json',document)
writeDB('data/document_history.json',document)
inserted = {'status':0,'id':doc_id,'name':name}
if int(user_type) == 1:
document = {}
document["id"] = doc_id
document["name"] = name
document["type"] = "Assistantship Request"
document["date"] = (time.strftime("%d/%m/%Y %H:%M:%S"))
document["status"] = "Created"
document["faculty"] = "Electrical Engineering"
document["major"] = "computer Engineering"
document["progress"] = 0
document["advisor"] = "Nestor Rodriguez"
document["project"] = project
document["task"] = task
document["student"] = student
document["requester"] = "Professor: Nestor Rodriguez"
document["sent_status"] = ""
document["last_edited"] = "Professor: Nestor Rodriguez"
print document
writeDB('data/professor_documents.json',document)
writeDB('data/document_history.json',document)
inserted = {'status':0,'id':doc_id,'name':name}
if int(user_type) == 2:
document = {}
document["id"] = doc_id
document["name"] = name
document["type"] = "Assistantship Request"
document["date"] = (time.strftime("%d/%m/%Y %H:%M:%S"))
document["status"] = "Created"
document["faculty"] = "Electrical Engineering"
document["major"] = "computer Engineering"
document["progress"] = 0
document["advisor"] = advisor
document["project"] = project
document["task"] = task
document["student"] = student
document["requester"] = "Assistant: Alida Minguela"
document["sent_status"] = ""
document["last_edited"] = "Assistant: Alida Minguela"
print document
writeDB('data/assistant_documents.json',document)
writeDB('data/document_history.json',document)
inserted = {'status':0,'id':doc_id,'name':name}
if int(user_type) == 3:
document = {}
document["id"] = doc_id
document["name"] = name
document["type"] = "Assistantship Request"
document["date"] = (time.strftime("%d/%m/%Y %H:%M:%S"))
document["status"] = "Created"
document["faculty"] = "Electrical Engineering"
document["major"] = "computer Engineering"
document["progress"] = 0
document["advisor"] = advisor
document["project"] = project
document["task"] = task
document["student"] = student
document["requester"] = "Director: Jose Colom"
document["sent_status"] = ""
docucment["last_edited"] = "Director Jose Colom"
print document
writeDB('data/director_documents.json',document)
writeDB('data/document_history.json',document)
inserted = {'status':0,'id':doc_id,'name':name}
return jsonify(inserted)
@app.route('/_insertAssistantAssistantship')
def insertAssistantAssistantship():
user_type = request.args['user_type']
advisor = request.args['advisor']
project = request.args['project']
task = request.args['task']
assistantship_type = request.args['a_type']
student = request.args['student']
student_number = request.args['student_number']
department = request.args['department']
major = request.args['major']
print "user_type", user_type
print "advisor", advisor
print "project", project
print "task", task
print "assistantship_type", assistantship_type
print "student", student
print "student_number", student_number
print "department", department
print "major", major
# student: student
inserted = {'status':-1}
# Setting values for document to be inserted
doc_id = random.randint(1, 999999999)
if assistantship_type == "Research":
name = "2016 Research Assistantship Request"
else:
name = "2016 TA Assistantship Request"
# If document is being sent by student
document = {}
document["id"] = doc_id
document["name"] = name
document["type"] = "Assistantship Request"
document["date"] = (time.strftime("%d/%m/%Y %H:%M:%S"))
document["status"] = "Created"
document["faculty"] = department
document["major"] = major
document["progress"] = 0
document["advisor"] = advisor
document["project"] = project
document["task"] = task
document["student"] = student
document["requester"] = "Assistant: Alida Minguela"
document["sent_status"] = ""
document["last_edited"] = "Assistant: Alida Minguela"
document["student number"] = student_number
print document
writeDB('data/assistant_documents.json',document)
writeDB('data/document_history.json',document)
inserted = {'status':0,'id':doc_id,'name':name}
return jsonify(inserted)
@app.route('/_insertTravelRequest')
def insertTravelRequest():
user_type = request.args['user_type']
conference_name = request.args['conference_name']
travel_location = request.args['travel_location']
departure_date = request.args['departure_date']
return_date = request.args['return_date']
advisor = request.args['advisor']
purpose= request.args['purpose']
requester = request.args['requester']
print "user_type",user_type
inserted = {'status':-1}
# Setting values for document to be inserted
doc_id = random.randint(1, 999999999)
# If document is being sent by student
if int(user_type) == 0:
document = {}
document["id"] = doc_id
document["name"] = conference_name + " Travel Request"
document["type"] = "Travel Request"
document["date"] = (time.strftime("%d/%m/%Y %H:%M:%S"))
document["status"] = "Created"
document["faculty"] = "Electrical Engineering"
document["major"] = "computer Engineering"
document["progress"] = 0
document["advisor"] = advisor
document["travel_location"] = travel_location
document["departure_date"] = departure_date
document["return_date"] = return_date
document["purpose"] = purpose
document["requester"] = "Student: Jessica Cotrina"
document["sent_status"] = ""
document["last_edited"] = "Student: Jessica Cotrina"
print document
writeDB('data/student_documents.json',document)
writeDB('data/document_history.json',document)
inserted = {'status':0,'id':doc_id,'name':conference_name}
# If document is being sent by professor
if int(user_type) == 1:
document = {}
document["id"] = doc_id
document["name"] = conference_name + " Travel Request"
document["type"] = "Travel Request"
document["date"] = (time.strftime("%d/%m/%Y %H:%M:%S"))
document["status"] = "Created"
document["faculty"] = "Electrical Engineering"
document["major"] = "computer Engineering"
document["progress"] = 0
document["advisor"] = advisor
document["travel_location"] = travel_location
document["departure_date"] = departure_date
document["return_date"] = return_date
document["purpose"] = purpose
document["requester"] = "Professor: Nestor Rodriguez"
document["sent_status"] = ""
document["last_edited"] = "Professor: Nestor Rodriguez"
print document
writeDB('data/professor_documents.json',document)
writeDB('data/document_history.json',document)
inserted = {'status':0,'id':doc_id,'name':conference_name}
if int(user_type) == 2:
document = {}
document["id"] = doc_id
document["name"] = conference_name + " Travel Request"
document["type"] = "Travel Request"
document["date"] = (time.strftime("%d/%m/%Y %H:%M:%S"))
document["status"] = "Created"
document["faculty"] = "Electrical Engineering"
document["major"] = "computer Engineering"
document["progress"] = 0
document["advisor"] = "Assistant: Alida Minguela"
document["travel_location"] = travel_location
document["departure_date"] = departure_date
document["return_date"] = return_date
document["purpose"] = purpose
document["requester"] = advisor
document["sent_status"] = ""
document["last_edited"] = "Assistant: Alida Minguela"
print document
writeDB('data/assistant_documents.json',document)
writeDB('data/document_history.json',document)
inserted = {'status':0,'id':doc_id,'name':conference_name}
if int(user_type) == 3:
document = {}
document["id"] = doc_id
document["name"] = conference_name + " Travel Request"
document["type"] = "Travel Request"
document["date"] = (time.strftime("%d/%m/%Y %H:%M:%S"))
document["status"] = "Created"
document["faculty"] = "Electrical Engineering"
document["major"] = "computer Engineering"
document["progress"] = 0
document["advisor"] = advisor
document["travel_location"] = travel_location
document["departure_date"] = departure_date
document["return_date"] = return_date
document["purpose"] = purpose
document["requester"] = "Director: Jose Colom"
document["sent_status"] = ""
document["last_edited"] = "Director: Jose Colom"
print document
writeDB('data/director_documents.json',document)
writeDB('data/document_history.json',document)
inserted = {'status':0,'id':doc_id,'name':conference_name}
return jsonify(inserted)
@app.route('/_saveAssistantship')
def saveAssistantship():
user_type = request.args['user_type']
doc_id = request.args['doc_id']
project = request.args['project']
advisor = request.args['advisor']
student = request.args['student']
task = request.args['task']
saved = {'status':-1}
if int(user_type) == 0:
documents = readDB('data/student_documents.json')["documents"]
for document in documents:
if int(document["id"]) == int(doc_id):
doc = document
doc["project"] = project
doc["advisor"] = advisor
doc["student"] = student
doc["task"] = task
doc["last_edited"] = "Student: Jessica Cotrina"
print "doc", doc
updateDB('data/student_documents.json',doc)
writeDB('data/document_history.json',doc)
saved = {'status':0,'doc_id':doc_id}
if int(user_type) == 1:
documents = readDB('data/professor_documents.json')["documents"]
for document in documents:
if int(document["id"]) == int(doc_id):
doc = document
doc["project"] = project
doc["advisor"] = advisor
doc["student"] = student
doc["task"] = task
doc["last_edited"] = "Professor: Nestor Rodriguez"
updateDB('data/profssor_documents.json',doc)
writeDB('data/document_history.json',doc)
saved = {'status':0,'doc_id':doc_id}
if int(user_type) == 2:
documents = readDB('data/assistant_documents.json')["documents"]
for document in documents:
if int(document["id"]) == int(doc_id):
doc = document
doc["project"] = project
doc["advisor"] = advisor
doc["student"] = student
doc["task"] = task
doc["last_edited"] = "Assistant: Alida Minguela"
updateDB('data/assistant_documents.json',doc)
writeDB('data/document_history.json',doc)
saved = {'status':0,'doc_id':doc_id}
if int(user_type) == 3:
documents = readDB('data/director_documents.json')["documents"]
for document in documents:
if int(document["id"]) == int(doc_id):
doc = document
doc["project"] = project
doc["advisor"] = advisor
doc["student"] = student
doc["task"] = task
doc["last_edited"] = "Director: Jessica Cotrina"
updateDB('data/director_documents.json',doc)
writeDB('data/document_history.json',doc)
saved = {'status':0,'doc_id':doc_id}
return jsonify(saved)
@app.route('/_getStudentAssistantships')
def get_StudentAssistantships():
print "Entre"
documents = readDB('data/student_documents.json')["documents"]
new_documents = {"documents":[]}
# print documents
for document in documents:
print document
if document["type"] == "Assistantship Request":
new_documents["documents"].append(document)
# print "\n",new_documents
return jsonify(new_documents)
@app.route('/_getProfessorAssistantships')
def get_ProfessorAssistantships():
print "Entre"
documents = readDB('data/professor_documents.json')["documents"]
new_documents = {"documents":[]}
# print documents
for document in documents:
print document
if document["type"] == "Assistantship Request":
new_documents["documents"].append(document)
# print "\n",new_documents
return jsonify(new_documents)
@app.route('/_getStudentTravelRequests')
def get_StudentTravelRequests():
print "Entre"
documents = readDB('data/student_documents.json')["documents"]
new_documents = {"documents":[]}
# print documents
for document in documents:
print document
if document["type"] == "Travel Request":
new_documents["documents"].append(document)
# print "\n",new_documents
return jsonify(new_documents)
@app.route('/_getProfessorTravelRequests')
def get_ProfessorTravelRequests():
print "Entre"
documents = readDB('data/professor_documents.json')["documents"]
new_documents = {"documents":[]}
# print documents
for document in documents:
print document
if document["type"] == "Travel Request":
new_documents["documents"].append(document)
# print "\n",new_documents
return jsonify(new_documents)
@app.route('/_getAssistantAssistantships')
def get_AssistantAssistantships():
print "Entre"
documents = readDB('data/assistant_documents.json')["documents"]
new_documents = {"documents":[]}
# print documents
for document in documents:
print document
if document["type"] == "Assistantship Request":
new_documents["documents"].append(document)
# print "\n",new_documents
return jsonify(new_documents)
@app.route('/_getDirectorAssistantships')
def get_DirectorAssistantships():
print "Entre"
documents = readDB('data/director_documents.json')["documents"]
new_documents = {"documents":[]}
# print documents
for document in documents:
print document
if document["type"] == "Assistantship Request":
new_documents["documents"].append(document)
# print "\n",new_documents
return jsonify(new_documents)
@app.route('/_getAssistantTravelRequests')
def get_AssistantTravelRequests():
print "Entre"
documents = readDB('data/assistant_documents.json')["documents"]
new_documents = {"documents":[]}
# print documents
for document in documents:
print document
if document["type"] == "Travel Request":
new_documents["documents"].append(document)
# print "\n",new_documents
return jsonify(new_documents)
@app.route('/_getDirectorTravelRequests')
def get_DirectorTravelRequests():
print "Entre"
documents = readDB('data/director_documents.json')["documents"]
new_documents = {"documents":[]}
# print documents
for document in documents:
print document
if document["type"] == "Travel Request":
new_documents["documents"].append(document)
# print "\n",new_documents
return jsonify(new_documents)
@app.route("/")
def init():
return render_template("login.html")
@app.route("/student")
def getStudents():
return render_template("student.html")
@app.route("/professor")
def getProfessors():
return render_template("professor.html")
@app.route("/assistant")
def getAssistant():
return render_template("assistant.html")
@app.route("/director")
def getDirector():
return render_template("director.html")
@app.route('/_sendDocument')
def sendAndSaveDocument():
print "Entre"
doc_id = request.args['doc_id']
user_type = request.args['user_type']
sent_to = request.args['sent_to']
message = request.args['message']
action = request.args['action'] if request.args['action'] else "Sent"
if action == "Authorize":
action_title = "Waiting for authorization"
if action == "Sign":
action_title = "Waiting for signature"
if action == "Verify":
action_title = "Waiting for verification"
if action == "Endose":
action_title = "Waiting for endorsement"
if action == "Sent":
action_title = "Sent"
# status = request.args['status'] ? request.args['status'] else "Sent"
sent = {'status':-1}
print doc_id, user_type, sent_to, sent, action
# If document is being sent by student
if int(user_type) == 0:
print "Soy estudiante"
documents = readDB('data/student_documents.json')["documents"]
print "documents",documents
# Getting documment
for doc in documents:
# print "Entre al doc"
print doc["id"], doc_id
if int(doc["id"]) == int(doc_id):
document = doc
# elif doc["name"] == doc_name + " Travel Request":
# document = doc
document["status"] = action_title
document["sent_status"] = "sent"
document["last_edited"] = "Student: Jessica Cotrina"
document["message"] = message
document["action"] = action
document["sent_to"] = sent_to
# document[""]
updateDB('data/student_documents.json',document)
# If document is being sent by professor
if int(user_type) == 1:
print "Soy profesor"
documents = readDB('data/professor_documents.json')["documents"]
# Getting documment
for doc in documents:
# print "Entre al doc"
print doc["id"], doc_id
if int(doc["id"]) == int(doc_id):
# print "True"
document = doc
# elif doc["name"] == doc_name + " Travel Request":
# document = doc
document["status"] = action
document["sent_status"] = "sent"
document["last_edited"] = "Professor: Nestor Rodriguez"
document["message"] = message
document["action"] = action
document["sent_to"] = sent_to
updateDB('data/professor_documents.json',document)
# If document is being sent by assistant
if int(user_type) == 2:
print "Soy Asistente"
documents = readDB('data/assistant_documents.json')["documents"]
# Getting documment
for doc in documents:
# print "Entre al doc"
print doc["id"], doc_id
if int(doc["id"]) == int(doc_id):
# print "True"
document = doc
# elif doc["name"] == doc_name + " Travel Request":
# document = doc
document["status"] = action
document["sent_status"] = "sent"
document["last_edited"] = "Assistant: Alida Minguela"
document["message"] = message
document["action"] = action
document["sent_to"] = sent_to
updateDB('data/assistant_documents.json',document)
# If document is being sent by director
if int(user_type) == 3:
print "Soy director"
documents = readDB('data/director_documents.json')["documents"]
# Getting documment
for doc in documents:
# print "Entre al doc"
print doc["id"], doc_id
if int(doc["id"]) == int(doc_id):
# print "True"
document = doc
# elif doc["name"] == doc_name + " Travel Request":
# document = doc
document["status"] = action
document["sent_status"] = "sent"
document["last_edited"] = "Director: Jose Colom"
document["message"] = message
document["action"] = action
document["sent_to"] = sent_to
updateDB('data/director_documents.json',document)
# If document being sent to professor
if sent_to == "nestor.rodriguez@upr.edu":
document["sent_status"] = "received"
documents = readDB('data/professor_documents.json')["documents"]
print "Professor Documents",documents
document_exists = False
for d in documents:
if int(d["id"]) == int(doc_id):
document_exists = True
print "Document Exists? ", document_exists
if document_exists == True:
updateDB('data/professor_documents.json',document)
else:
writeDB('data/professor_documents.json',document)
writeDB('data/document_history.json',document)
sent = {'status':0}
# If document being set to assistant
if sent_to == "alida.minguela@upr.edu":
document["sent_status"] = "received"
documents = readDB('data/assistant_documents.json')["documents"]
document_exists = False
for d in documents:
if int(d["id"]) == int(doc_id):
document_exists = True
print "Document Exists? ", document_exists
if document_exists == True:
updateDB('data/assistant_documents.json',document)
else:
writeDB('data/assistant_documents.json',document)
writeDB('data/document_history.json',document)
sent = {'status':0}
# If document being sent to director
if sent_to == "jose.colom@upr.edu":
document["sent_status"] = "received"
documents = readDB('data/director_documents.json')["documents"]
document_exists = False
for d in documents:
if int(d["id"]) == int(doc_id):
document_exists = True
print "Document Exists? ", document_exists
if document_exists == True:
updateDB('data/director_documents.json',document)
else:
writeDB('data/director_documents.json',document)
writeDB('data/document_history.json',document)
sent = {'status':0}
# If document being sent to student
if sent_to == "jessica.cotrina@upr.edu":
document["sent_status"] = "received"
documents = readDB('data/student_documents.json')["documents"]
document_exists = False
for d in documents:
if int(d["id"]) == int(doc_id):
document_exists = True
print "Document Exists? ", document_exists
if document_exists == True:
updateDB('data/student_documents.json',document)
else:
writeDB('data/student_documents.json',document)
writeDB('data/document_history.json',document)
sent = {'status':0}
return jsonify(sent)
@app.route("/doc_info",methods=['GET'])
def getDocumentInfo():
doc_id = request.args['id']
documents = readDB('data/document_history.json')['documents']
for document in documents:
if int(document["id"]) == int(doc_id):
doc = document
return render_template("doc_info.html",document_id=doc["id"])
@app.route("/_getDocumentHistory",methods=['GET'])
def getInfo():
document_id = request.args['document_id']
documents = readDB('data/document_history.json')['documents']
document_history = {"history":[]}
for document in documents:
if int(document["id"]) == int(document_id):
document_history["history"].append(document)
return jsonify(document_history)
if __name__ == "__main__":
app.run()
|
|
from Bio.Seq import Seq
from Bio import SeqIO
from Bio.Alphabet import IUPAC
from Bio.SeqRecord import SeqRecord
import argparse
import glob
import os
import tempfile
import subprocess
import sys
from datetime import datetime
""" This script takes in 2 fasta files and trimms the sequences in 1 to that of the reference.
I am using MUSCLE to align the sequences, and am drawing heavily (in many cases verbatim) from the HA_number script
deleveped by Jesse Bloom at https://github.com/jbloomlab/HA_numbering/blob/master/HA_numbering.py
"""
def Align(headers_seqs, progpath, musclegapopen=None):
"""Performs a multiple sequence alignment of two or more sequences.
By default, the protein sequences are aligned using PROBCONS. This is
probably the most accurate alignment program. However, it is
slow and consumes large amounts of memory if you are aligning
a very large number of sequences (typically if you are aligning
more than several hundred). In that case, you may prefer to use
MUSCLE instead. You can choose between the two with the 'program'
option. If you decide to use MUSCLE, you can also align nucleotide
sequences with this program.
'headers_seqs' is a list of seq_record objects. The list must contain 2 objects and the reference is [first,second].
'progpath' should specify a directory containing the alignment program executable,
either PROBCONS or MUSCLE. The PROBCONS executable is assumed to have
the name "probcons" in this directory. The MUSCLE executable is assumed to
have the name "muscle" in this directory.
'program' specifies what program to use for the alignment. By default, it is
"PROBCONS". If you wish to use MUSCLE instead, set it to "MUSCLE".
'musclegapopen' sets the MUSCLE gap openining penalty to the specified
value. By default it is None, meaning we use the MUSCLE default penalty.
You can also set it to a number; for example -100 will lead to fewer gaps.
This executable is used to perform a multiple sequence alignment of the proteins
with the default settings of either PROBCONS or MUSCLE. The returned variable is a
new list 'aligned_headers_seqs'. Each entry is a 2-tuple '(head, aligned_seq)'.
'head' has the same meaning as on input (the sequence header) and
'aligned_seq' is the aligned sequence, with gaps inserted as '-'
as appropriate. Therefore, all of the 'aligned_seq' entries in
'aligned_headers_seqs' are of the same length. The entries in 'aligned_headers_seq'
are in the same order as in the input list 'headers_seqs'.
"""
if not (isinstance(headers_seqs, list) and len(headers_seqs) >= 2):
raise ValueError, 'header_seqs does not specify a list with at least two entries.'
if not os.path.isdir(progpath):
raise ValueError, "Cannot find directory %s." % progpath
exe = os.path.abspath("%s/muscle" % progpath) # the executable
if not os.path.isfile(exe):
raise IOError, "Cannot find executable at %s." % exe
currdir = os.getcwd()
tempdir = tempfile.mkdtemp()
try:
# do stuff in a temporary directory
infile = "%s/in.fasta" % tempdir # input file
outfile = "%s/out.fasta" % tempdir # output file
SeqIO.write(headers_seqs, infile, "fasta") # write sequences to the input file
if musclegapopen != None:
p = subprocess.Popen("%s -gapopen %d -in %s -out %s" % (exe, musclegapopen, infile, outfile), shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE) # run MUSCLE
else:
p = subprocess.Popen("%s -in %s -out %s" % (exe, infile, outfile), shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE) # run MUSCLE
(output, errors) = p.communicate()
try:
aligned_headers_seqs = ReadFASTA(outfile)
except:
sys.stderr.write("Error getting alignment output, error of %s" % errors)
raise
finally:
os.chdir(currdir) # return to the original directory
for file in os.listdir(tempdir):
os.remove("%s/%s" % (tempdir, file)) # remove files from temporary directory
os.rmdir(tempdir) # remove temporary directory
if len(aligned_headers_seqs) != len(headers_seqs):
raise ValueError, "Did not return the correct number of aligned sequences."
# put the aligned sequences in the same order as the input sequences
# n = len(aligned_headers_seqs[0][1]) # length of aligned sequences
# d = dict(aligned_headers_seqs)
# aligned_headers_seqs = []
# for (head, seq) in headers_seqs:
# try:
# alignedseq = d[head]
# except KeyError:
# raise ValueError("After alignment, the following header is missing: %s" % head)
# if len(alignedseq) != n:
# open('errors.temp', 'w').write(errors)
# raise ValueError("Aligned sequence %s is not of length %d: if you are using MUSCLE, you may be running out of memory. Errors have been written to errors.temp." % (alignedseq, n))
# if len(seq) > n:
# open('errors.temp', 'w').write(errors)
# raise ValueError("Unaligned seq %s is longer than aligned length of %d: if you are using MUSCLE, you many be running out of memory. Errors have been written to errors.temp." % (seq, n))
# aligned_headers_seqs.append((head, alignedseq))
#print(aligned_headers_seqs)
return aligned_headers_seqs # return the aligned sequences
def get_regions(ref_seq):
"""
ref_seq is a string containing gaps '-'
This function returns a list of lists (legnth two)
with the start and stop (python) of the non gapped regions.
"""
gap=True
gene = []
region = []
for i in range(0,len(ref_seq)):
if ref_seq[i]!='-' and gap==True:
region.append(i)
if i ==len(ref_seq)-1: # There is a trailing base at the end
region.append(i+1)
gene.append(region)
gap=False
elif ref_seq[i]=='-' and gap ==False:
region.append(i)
gene.append(region)
region=[]
gap=True
#print "setting gap to true"
elif ref_seq[i]!='-' and gap==False and i ==len(ref_seq)-1: # The last character is not a gap and should be included in this frame
region.append(i+1)
gene.append(region)
return gene
def trim_to_regions(sequence,regions):
trimmed_sequence = ""
for segment in regions:
trimmed_sequence=trimmed_sequence+sequence[segment[0]:segment[1]]
return(trimmed_sequence)
def trim_sequences(aligned_header_seqs):
"""
The first sequence in this alignment is taken to correspond to the reference sequence.
The returned variable is a list similar to aligned_headers_seqs, but with
all positions corresponding to gaps in this reference sequence stripped away.
In the sample sequence, every character at
the same position as a gap in the reference sequence is removed. The headers are
unchanged. The order of sequences in this stripped alignment is also
unchanged.
"""
if not (isinstance(aligned_header_seqs, list) and len(aligned_header_seqs) >= 2):
raise ValueError, "Input does not specify at least two aligned sequences."
ref_seq = aligned_header_seqs[0]# str yields the sequence the reference here is shorter so we count the '-' in this sequence.
regions= get_regions(ref_seq)
samp_seq=aligned_header_seqs[1]
sample_sequence = trim_to_regions(samp_seq,regions)
return([sample_sequence,regions])
def ReadFASTA(fastafile):
"""Reads sequences from a FASTA file.
'fastafile' should specify the name of a FASTA file.
This function reads all sequences from the FASTA file. It returns the
list 'headers_seqs'. This list is composed of a seq_record objects.
"""
seqs =[]
header = None
for seq_record in SeqIO.parse(fastafile, "fasta"):
seq_record.seq.alphabet=IUPAC.unambiguous_dna
seqs.append(seq_record)
return seqs
def main(): # The positions will be given as base 0 and adjusted to match the convention (base 1) in the funciton
"""Main body of script."""
print "\nBeginning execution trimming script."
parser = argparse.ArgumentParser(description='This script takes in a fasta test fasta file and trims it to match the regions found a reference fasta file.\n I am using it to trim whole genomes to just the coding regions, but I suppose it could have other uses. Currently it relies on MUSCLE. The segment names in the sample file must match those in the reference.')
parser.add_argument('aligner_path', metavar='aligner_path', nargs='+',
help='The path to the muscle executable - assuming the executable is name muscle')
parser.add_argument('in_fa', metavar='in_fa', nargs='+',
help='The input (sample) fa')
parser.add_argument('ref_fa', metavar='ref', nargs='+',
help='The reference fasta to which the sequences will be trimmed.')
parser.add_argument('-out_fa',action='store',dest='out_fa',default=None,
help='optional output the trimmed fasta file')
parser.add_argument('-tsv',action='store',dest='tsv',default=None,
help='optional output - a tsv file recording the number of bp trimmed off the 5\' and 3\' ends')
args = parser.parse_args()
tsv=args.tsv
# parse arguments
args = parser.parse_args()
alignerpath = args.aligner_path[0]
if not os.path.isdir(alignerpath):
raise IOError,"The directory of %s specified by musclepath does not exist." % (alignerpath)
prog = 'MUSCLE'
sample=ReadFASTA(args.in_fa[0])
ref=ReadFASTA(args.ref_fa[0])
samp_seqname=[]
ref_seqname=[]
for seq in sample:
samp_seqname.append(seq.id)
for seq in ref:
ref_seqname.append(seq.id)
# make alignments
print("Making %s alignments..." % prog)
align_ref = []
align_samp=[]
for seqname in samp_seqname:
#print("Aligning %s" % seqname)
sample_seq=sample[samp_seqname.index(seqname)]
try:
ref_seq=ref[ref_seqname.index(seqname)]
except ValueError:
raise ValueError, " Segement %s was not found in the reference sequence" % seqname
alignments=Align([ref_seq, sample_seq], alignerpath)
align_ref.append(alignments[0])
align_samp.append(alignments[1])
print("Trimming...\n")
trimmed=[]
segs=[]
regions = []
for i in range(0,len(align_samp)):
print "Trimming %s" % align_samp[i].id
trimmed_out=trim_sequences([align_ref[i].seq,align_samp[i].seq])
record = SeqRecord(trimmed_out[0],id = align_samp[i].id, description = "made on %s" % str(datetime.now()))
trimmed.append(record)
# make seqRecord object here.
segs.append(align_samp[i].id)
regions.append(trimmed_out[1])
if(tsv==None):
print "writing output to %s" % args.out_fa
SeqIO.write(trimmed, args.out_fa, "fasta")
else:
print "writing tsv file to %s" % tsv
with open(tsv,'w') as out_file:
out_file.write("chr\tcoding\n")
for i in range(0,len(regions)) :
out_file.write(str(segs[i])+"\t"+ str(regions[i])+ '\n')
if __name__ == '__main__':
main()
|
|
# Purpose - ItemsetData is a class to generate transaction data with
# seeded events in order to test mining algorithms
import random
import sys
from collections import defaultdict, namedtuple
import matplotlib.pyplot as plt
import numpy as np
class ItemsetData:
# Initiates the ItemsetData object
# Inputs
# minRealTemplates = The lower bound for the template ID range used to create true sequences
# maxRealTemplates = The upper bound for the template ID range used to create true sequences
# minfillerTemplates = The lower bound for the template ID range used as filler in transactions
# maxfillerTemplates = The upper bound for the template ID range used as
# filler in transactions
def __init__(
self,
minrealTemplates=1,
maxrealTemplates=200,
minfillerTemplates=300,
maxfillerTemplates=500):
# Templates id ranges in true planned patterns and filler patterns
self.minrealTemplates = minrealTemplates
self.maxrealTemplates = maxrealTemplates
self.minfillerTemplates = minfillerTemplates
self.maxfillerTemplates = maxfillerTemplates
self.realTemplates = range(minrealTemplates, maxrealTemplates)
self.fillerTemplates = range(minfillerTemplates, maxfillerTemplates)
# Lists to store real events and the generated transactions
self.realEventsList = []
self.transactionsList = []
#self.transactionsWindows = []
# Variables for tracking usage statistics in object
self.counter = defaultdict(int)
self.wasItUsed = defaultdict(int)
self.fillerTemplatesLimit = defaultdict(int)
# Create list of defined event sequences
# Input
# numRealPatterns = The true number of unique sequences that will appear in the transactions
# minRealEventLength = The minimum length for the unique sequences
# maxRealEventLength = The maximum length for the unique sequences
# Output
# Returns a list containing list of templates that can be used as frequent
# itemsets
def createRealEvents(
self,
numRealPatterns=20,
minRealEventLength=2,
maxRealEventLength=15):
realEvent = []
lenRealPatterns = []
done = False
# Create array of semi-random event lengths
for x in range(numRealPatterns):
lenRealPatterns.append(
random.randint(
minRealEventLength,
maxRealEventLength))
# Build real events
for i in range(numRealPatterns):
realEvent = []
for j in range(lenRealPatterns[i]):
done = False
while (not done):
tempChoice = random.choice(self.realTemplates)
if tempChoice not in realEvent:
realEvent.append(tempChoice)
done = True
self.realEventsList.append(realEvent)
return self.realEventsList
# Insert high frequency templates IDs into existing transactionsList
# Inputs
# value = The template ID of the high frequency item
# modChance = The modular probability of inserting the high frequency item
# (ex: 20=5%, 10=10%, 5=20%)
def addFreq(self, value=-1, modChance=10):
# Occasionally add in high frequency template value of -1
for row in self.transactionsList:
if random.randint(0, 100) % modChance == 0:
row.append(value)
self.wasItUsed[value] += 1
# Create transactions of template IDs
# Inputs
# numTransactions = Number of transactions to generate
# transactionLength = The upper bound for the length of any given transaction
# probabilityEvent = The probability that an event is inserted into a transaction
# paddingLimit = The maximum number of filler templates to pad when inserting a seeded event in a transaction
# Outputs
# Returns a list of transactions or a list of namedtuples
def createTransactions(
self,
numTransactions=1000,
transactionLength=20,
probabilityEvent=.5,
paddingLimit=2):
# Create randomized usage limits for filler templates
for i in self.fillerTemplates:
self.fillerTemplatesLimit[i] = random.randint(
int(numTransactions) / 1000, int(numTransactions / 50))
probabilityEvent = int((1 - probabilityEvent) * 100)
for i in range(numTransactions):
if i != 0 and i % 10000 == 0:
sys.stderr.write(
"\nJust passed " +
str(i) +
" transactions...")
transactionList = []
# If true, add in a real event and pad with a semi-random amount of
# filler
if(random.randint(0, 100) > probabilityEvent):
real = list(random.choice(self.realEventsList))
for j in range(len(real)):
isItStuck = 0
for k in range(random.randint(0, paddingLimit)):
ok = False
while(not ok):
temp = int(
random.gauss(
self.maxfillerTemplates / 2,
self.minfillerTemplates))
if temp > self.minfillerTemplates and temp < self.maxfillerTemplates and self.wasItUsed[
temp] < self.fillerTemplatesLimit[temp]:
self.wasItUsed[temp] += 1
transactionList.append(temp)
ok = True
else:
isItStuck += 1
if isItStuck > 1000:
ok = True
self.wasItUsed[real[j]] += 1
transactionList.append(real[j])
for k in range(random.randint(0, paddingLimit)):
ok = False
while(not ok):
temp = int(
random.gauss(
self.maxfillerTemplates / 2,
self.minfillerTemplates))
if temp > self.minfillerTemplates and temp < self.maxfillerTemplates and self.wasItUsed[
temp] < self.fillerTemplatesLimit[temp]:
self.wasItUsed[temp] += 1
transactionList.append(temp)
ok = True
else:
isItStuck += 1
if isItStuck > 1000:
ok = True
self.counter[frozenset(real)] += 1
# If false, put in only filler templates
else:
useLength = random.randint(3, transactionLength)
for k in range(useLength):
ok = False
isItStuck = 0
while(not ok):
temp = int(
random.gauss(
self.maxfillerTemplates / 2,
self.minfillerTemplates))
if temp > self.minfillerTemplates and temp < self.maxfillerTemplates and self.wasItUsed[
temp] < self.fillerTemplatesLimit[temp]:
self.wasItUsed[temp] += 1
transactionList.append(temp)
ok = True
else:
isItStuck += 1
if isItStuck > 1000:
ok = True
self.transactionsList.append(transactionList)
return self.transactionsList
# Write transactions to file
# Inputs
# outFile = File name to write transactions
def writeTransactions(self, outFile):
outputFile = open(outFile, 'w')
for line in self.transactionsList:
outputFile.write(''.join([str(i) + ' ' for i in line]) + "\n")
outputFile.close()
# Write Metrics/Stats File
# Inputs
# outFile = File name to write transactions
def writeStandardStats(self, outFile):
outputFile = open(outFile, 'w')
# Output the real events that were inserted in transactions
outputFile.write("------------" +
str(len(self.realEventsList)) +
" Real Events (Pattern->Count)----------\n")
for entity in self.counter:
outputFile.write(''.join(
[str(i) + ' ' for i in entity]) + "->" + str(self.counter[entity]) + "\n")
# Output template usage stats
outputFile.write(
"\n\n-----------Template Stats (Temple->Count if Count > 500)------------\n")
for template in sorted(
self.wasItUsed,
key=self.wasItUsed.get,
reverse=True):
if self.wasItUsed[template] > 500:
outputFile.write(str(template) + "->" +
str(self.wasItUsed[template]) + "\n")
outputFile.close()
# Plot bar chart of overall template usage
def plotTemplateUsage(self, title):
index = np.arange(len(self.wasItUsed))
fig, ax = plt.subplots()
bar_width = 1
opacity = 0.4
templateIDs = []
usageStats = []
newLabels = []
for x in sorted(self.wasItUsed.items()):
if x[0] % 50 != 0:
templateIDs.append('')
else:
templateIDs.append(x[0])
usageStats.append(x[1])
templatesPlot = plt.bar(index, usageStats, bar_width,
alpha=opacity,
color='b')
plt.xlabel('Template ID')
plt.ylabel('Times Used')
plt.title(title + ' - Templates Used in Transactions')
plt.xticks(index, templateIDs, rotation="vertical")
plt.show()
|
|
import os
import multiprocessing
import logging
import marshal
import cPickle
import shutil
import struct
import urllib
import msgpack
from dpark.env import env
from dpark.tracker import GetValueMessage, AddItemMessage, RemoveItemMessage
logger = logging.getLogger("cache")
class Cache:
data = {}
def get(self, key):
return self.data.get(key)
def put(self, key, value, is_iterator=False):
if value is not None:
if is_iterator:
value = list(value)
self.data[key] = value
return value
else:
self.data.pop(key, None)
def clear(self):
self.data.clear()
class DiskCache(Cache):
def __init__(self, tracker, path):
if not os.path.exists(path):
try: os.makedirs(path)
except: pass
self.tracker = tracker
self.root = path
def get_path(self, key):
return os.path.join(self.root, '%s_%s' % key)
def get(self, key):
p = self.get_path(key)
if os.path.exists(p):
return self.load(open(p, 'rb'))
# load from other node
if not env.get('SERVER_URI'):
return
rdd_id, index = key
locs = self.tracker.getCacheUri(rdd_id, index)
if not locs:
return
serve_uri = locs[-1]
uri = '%s/cache/%s' % (serve_uri, os.path.basename(p))
f = urllib.urlopen(uri)
if f.code == 404:
logger.warning('load from cache %s failed', uri)
self.tracker.removeHost(rdd_id, index, serve_uri)
f.close()
return
return self.load(f)
def put(self, key, value, is_iterator=False):
p = self.get_path(key)
if value is not None:
return self.save(self.get_path(key), value)
else:
os.remove(p)
def clear(self):
try:
shutil.rmtree(self.root)
except OSError, e:
pass
def load(self, f):
count, = struct.unpack("I", f.read(4))
if not count: return
unpacker = msgpack.Unpacker(f, use_list=False)
for i in xrange(count):
_type, data = unpacker.next()
if _type == 0:
yield marshal.loads(data)
else:
yield cPickle.loads(data)
f.close()
def save(self, path, items):
# TODO: purge old cache
tp = "%s.%d" % (path, os.getpid())
with open(tp, 'wb') as f:
c = 0
f.write(struct.pack("I", c))
try_marshal = True
for v in items:
if try_marshal:
try:
r = 0, marshal.dumps(v)
except Exception:
r = 1, cPickle.dumps(v, -1)
try_marshal = False
else:
r = 1, cPickle.dumps(v, -1)
f.write(msgpack.packb(r))
c += 1
yield v
bytes = f.tell()
if bytes > 10<<20:
logger.warning("cached result is %dMB (larger than 10MB)", bytes>>20)
# count
f.seek(0)
f.write(struct.pack("I", c))
os.rename(tp, path)
class BaseCacheTracker(object):
cache = None
def registerRDD(self, rddId, numPartitions):
pass
def getLocationsSnapshot(self):
pass
def getCachedLocs(self, rdd_id, index):
pass
def getCacheUri(self, rdd_id, index):
pass
def addHost(self, rdd_id, index, host):
pass
def removeHost(self, rdd_id, index, host):
pass
def clear(self):
self.cache.clear()
def getOrCompute(self, rdd, split):
key = (rdd.id, split.index)
cachedVal = self.cache.get(key)
if cachedVal is not None:
logger.debug("Found partition in cache! %s", key)
for i in cachedVal:
yield i
else:
logger.debug("partition not in cache, %s", key)
for i in self.cache.put(key, rdd.compute(split), is_iterator=True):
yield i
serve_uri = env.get('SERVER_URI')
if serve_uri:
self.addHost(rdd.id, split.index, serve_uri)
def stop(self):
self.clear()
class LocalCacheTracker(BaseCacheTracker):
def __init__(self):
self.locs = {}
self.cache = Cache()
def registerRDD(self, rddId, numPartitions):
if rddId not in self.locs:
logger.debug("Registering RDD ID %d with cache", rddId)
self.locs[rddId] = [[] for i in range(numPartitions)]
def getLocationsSnapshot(self):
return self.locs
def getCachedLocs(self, rdd_id, index):
def parse_hostname(uri):
if uri.startswith('http://'):
h = uri.split(':')[1].rsplit('/', 1)[-1]
return h
return ''
return map(parse_hostname, self.getCacheUri(rdd_id, index))
def getCacheUri(self, rdd_id, index):
return self.locs[rdd_id][index]
def addHost(self, rdd_id, index, host):
self.locs[rdd_id][index].append(host)
def removeHost(self, rdd_id, index, host):
if host in self.locs[rdd_id][index]:
self.locs[rdd_id][index].remove(host)
class CacheTracker(BaseCacheTracker):
def __init__(self):
cachedir = os.path.join(env.get('WORKDIR')[0], 'cache')
self.cache = DiskCache(self, cachedir)
self.client = env.trackerClient
if env.isMaster:
self.locs = env.trackerServer.locs
self.rdds = {}
def registerRDD(self, rddId, numPartitions):
self.rdds[rddId] = numPartitions
def getLocationsSnapshot(self):
result = {}
for rdd_id, partitions in self.rdds.items():
result[rdd_id] = [self.locs.get('cache:%s-%s' % (rdd_id, index), [])
for index in xrange(partitions)]
return result
def getCachedLocs(self, rdd_id, index):
def parse_hostname(uri):
if uri.startswith('http://'):
h = uri.split(':')[1].rsplit('/', 1)[-1]
return h
return ''
return map(parse_hostname, self.locs.get('cache:%s-%s' % (rdd_id, index), []))
def getCacheUri(self, rdd_id, index):
return self.client.call(GetValueMessage('cache:%s-%s' % (rdd_id, index)))
def addHost(self, rdd_id, index, host):
return self.client.call(AddItemMessage('cache:%s-%s' % (rdd_id, index), host))
def removeHost(self, rdd_id, index, host):
return self.client.call(RemoveItemMessage('cache:%s-%s' % (rdd_id, index), host))
def getOrCompute(self, rdd, split):
key = (rdd.id, split.index)
cachedVal = self.cache.get(key)
if cachedVal is not None:
logger.debug("Found partition in cache! %s", key)
for i in cachedVal:
yield i
else:
logger.debug("partition not in cache, %s", key)
for i in self.cache.put(key, rdd.compute(split), is_iterator=True):
yield i
serve_uri = env.get('SERVER_URI')
if serve_uri:
self.addHost(rdd.id, split.index, serve_uri)
def __getstate__(self):
raise Exception("!!!")
def test():
logging.basicConfig(level=logging.DEBUG)
from dpark.context import DparkContext
dc = DparkContext("local")
dc.start()
nums = dc.parallelize(range(100), 10)
tracker = CacheTracker(True)
tracker.registerRDD(nums.id, len(nums))
split = nums.splits[0]
print list(tracker.getOrCompute(nums, split))
print list(tracker.getOrCompute(nums, split))
print tracker.getLocationsSnapshot()
tracker.stop()
if __name__ == '__main__':
test()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteFilterRulesOperations:
"""RouteFilterRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs: Any
) -> "_models.RouteFilterRule":
"""Gets the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilterRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_06_01.models.RouteFilterRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.RouteFilterRule",
**kwargs: Any
) -> "_models.RouteFilterRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'RouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.RouteFilterRule",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteFilterRule"]:
"""Creates or updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the create or update route filter
rule operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2018_06_01.models.RouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_06_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.PatchRouteFilterRule",
**kwargs: Any
) -> "_models.RouteFilterRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'PatchRouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.PatchRouteFilterRule",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteFilterRule"]:
"""Updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the update route filter rule
operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2018_06_01.models.PatchRouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_06_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def list_by_route_filter(
self,
resource_group_name: str,
route_filter_name: str,
**kwargs: Any
) -> AsyncIterable["_models.RouteFilterRuleListResult"]:
"""Gets all RouteFilterRules in a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_06_01.models.RouteFilterRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_route_filter.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_route_filter.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules'} # type: ignore
|
|
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django import template
from django.core.exceptions import ValidationError
from django.contrib.admin import helpers
from django.contrib.admin.util import quote, unquote, capfirst
from django.contrib import messages
from django.utils.http import urlquote
from .patched.admin_utils import get_deleted_objects
from django.core.exceptions import PermissionDenied
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db import router, models
from django.db.models import Q
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from filer.utils.compatibility import get_delete_permission
try:
from django.utils.encoding import force_text
except ImportError:
# Django < 1.5
from django.utils.encoding import force_unicode as force_text
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext, ugettext_lazy
from .. import settings
from .forms import (CopyFilesAndFoldersForm, ResizeImagesForm,
RenameFilesForm)
from .permissions import PrimitivePermissionAwareModelAdmin
from ..views import (popup_status, popup_param, selectfolder_status,
selectfolder_param)
from .tools import (userperms_for_request,
check_folder_edit_permissions,
check_files_edit_permissions,
check_files_read_permissions,
check_folder_read_permissions)
from ..models import (Folder, FolderRoot, UnfiledImages, File, tools,
ImagesWithMissingData, FolderPermission, Image)
from ..settings import FILER_STATICMEDIA_PREFIX
from filer.utils.filer_easy_thumbnails import FilerActionThumbnailer
from filer.thumbnail_processors import normalize_subject_location
from django.conf import settings as django_settings
import os
import re
import itertools
class AddFolderPopupForm(forms.ModelForm):
folder = forms.HiddenInput()
class Meta:
model = Folder
fields = ('name',)
class FolderAdmin(PrimitivePermissionAwareModelAdmin):
list_display = ('name',)
exclude = ('parent',)
list_per_page = settings.MEDIA_PAGINATE_BY
list_filter = ('owner',)
search_fields = ['name', 'files__name']
raw_id_fields = ('owner',)
save_as = True # see ImageAdmin
actions = ['move_to_clipboard', 'files_set_public', 'files_set_private',
'delete_files_or_folders', 'move_files_and_folders',
'copy_files_and_folders', 'resize_images', 'rename_files']
directory_listing_template = 'admin/media/folder/directory_listing.html'
order_by_file_fields = ('_file_size', 'original_filename', 'name', 'owner',
'uploaded_at', 'modified_at')
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
parent_id = request.REQUEST.get('parent_id', None)
if parent_id:
return AddFolderPopupForm
else:
folder_form = super(FolderAdmin, self).get_form(
request, obj=None, **kwargs)
def folder_form_clean(form_obj):
cleaned_data = form_obj.cleaned_data
folders_with_same_name = Folder.objects.filter(
parent=form_obj.instance.parent,
name=cleaned_data['name'])
if form_obj.instance.pk:
folders_with_same_name = folders_with_same_name.exclude(
pk=form_obj.instance.pk)
if folders_with_same_name.exists():
raise ValidationError('Folder with this name already exists.')
return cleaned_data
# attach clean to the default form rather than defining a new form class
folder_form.clean = folder_form_clean
return folder_form
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
r = form.save(commit=False)
parent_id = request.REQUEST.get('parent_id', None)
if parent_id:
parent = Folder.objects.get(id=parent_id)
r.parent = parent
return r
def response_change(self, request, obj):
"""
Overrides the default to be able to forward to the directory listing
instead of the default change_list_view
"""
r = super(FolderAdmin, self).response_change(request, obj)
## Code borrowed from django ModelAdmin to determine changelist on the fly
if r['Location']:
# it was a successful save
if (r['Location'] in ['../'] or
r['Location'] == self._get_post_url(obj)):
if obj.parent:
url = reverse('admin:filer-directory_listing',
kwargs={'folder_id': obj.parent.id})
else:
url = reverse('admin:filer-directory_listing-root')
url = "%s%s%s" % (url,popup_param(request),
selectfolder_param(request,"&"))
return HttpResponseRedirect(url)
else:
# this means it probably was a save_and_continue_editing
pass
return r
def render_change_form(self, request, context, add=False, change=False,
form_url='', obj=None):
extra_context = {'show_delete': True,
'is_popup': popup_status(request),
'select_folder': selectfolder_status(request),}
context.update(extra_context)
return super(FolderAdmin, self).render_change_form(
request=request, context=context, add=False,
change=False, form_url=form_url, obj=obj)
def delete_view(self, request, object_id, extra_context=None):
"""
Overrides the default to enable redirecting to the directory view after
deletion of a folder.
we need to fetch the object and find out who the parent is
before super, because super will delete the object and make it
impossible to find out the parent folder to redirect to.
"""
parent_folder = None
try:
obj = self.queryset(request).get(pk=unquote(object_id))
parent_folder = obj.parent
except self.model.DoesNotExist:
obj = None
r = super(FolderAdmin, self).delete_view(
request=request, object_id=object_id,
extra_context=extra_context)
url = r.get("Location", None)
if url in ["../../../../", "../../"] or url == self._get_post_url(obj):
if parent_folder:
url = reverse('admin:filer-directory_listing',
kwargs={'folder_id': parent_folder.id})
else:
url = reverse('admin:filer-directory_listing-root')
url = "%s%s%s" % (url,popup_param(request),
selectfolder_param(request,"&"))
return HttpResponseRedirect(url)
return r
def icon_img(self, xs):
return mark_safe(('<img src="%simg/icons/plainfolder_32x32.png" ' + \
'alt="Folder Icon" />') % FILER_STATICMEDIA_PREFIX)
icon_img.allow_tags = True
def get_urls(self):
from django.conf.urls import patterns, url
urls = super(FolderAdmin, self).get_urls()
from .. import views
url_patterns = patterns('',
# we override the default list view with our own directory listing
# of the root directories
url(r'^$',
self.admin_site.admin_view(self.directory_listing),
name='filer-directory_listing-root'),
url(r'^last/$',
self.admin_site.admin_view(self.directory_listing),
{'viewtype': 'last'},
name='filer-directory_listing-last'),
url(r'^(?P<folder_id>\d+)/list/$',
self.admin_site.admin_view(self.directory_listing),
name='filer-directory_listing'),
url(r'^scan_folder/$',
self.admin_site.admin_view(views.scan_folder),
name='media-scan-folder'),
url(r'^(?P<folder_id>\d+)/scan_folder/$',
self.admin_site.admin_view(views.scan_folder),
name='media-scan-to-folder'),
url(r'^images_with_missing_data/$',
self.admin_site.admin_view(self.directory_listing),
{'viewtype': 'images_with_missing_data'},
name='filer-directory_listing-images_with_missing_data'),
url(r'^unfiled_images/$',
self.admin_site.admin_view(self.directory_listing),
{'viewtype': 'unfiled_images'},
name='filer-directory_listing-unfiled_images'),
url(r'^(?P<folder_id>\d+)/make_folder/$',
self.admin_site.admin_view(views.make_folder),
name='filer-directory_listing-make_folder'),
url(r'^make_folder/$',
self.admin_site.admin_view(views.make_folder),
name='filer-directory_listing-make_root_folder'),
)
url_patterns.extend(urls)
return url_patterns
# custom views
def directory_listing(self, request, folder_id=None, viewtype=None):
clipboard = tools.get_user_clipboard(request.user)
if viewtype == 'images_with_missing_data':
folder = ImagesWithMissingData()
elif viewtype == 'unfiled_images':
folder = UnfiledImages()
elif viewtype == 'last':
last_folder_id = request.session.get('filer_last_folder_id')
try:
Folder.objects.get(id=last_folder_id)
except Folder.DoesNotExist:
url = reverse('admin:filer-directory_listing-root')
url = "%s%s%s" % (url, popup_param(request), selectfolder_param(request,"&"))
else:
url = reverse('admin:filer-directory_listing', kwargs={'folder_id': last_folder_id})
url = "%s%s%s" % (url, popup_param(request), selectfolder_param(request,"&"))
return HttpResponseRedirect(url)
elif folder_id is None:
folder = FolderRoot()
else:
folder = get_object_or_404(Folder, id=folder_id)
request.session['filer_last_folder_id'] = folder_id
# Check actions to see if any are available on this changelist
actions = self.get_actions(request)
# Remove action checkboxes if there aren't any actions available.
list_display = list(self.list_display)
if not actions:
try:
list_display.remove('action_checkbox')
except ValueError:
pass
# search
q = request.GET.get('q', None)
if q:
search_terms = unquote(q).split(" ")
else:
search_terms = []
q = ''
limit_search_to_folder = request.GET.get('limit_search_to_folder',
False) in (True, 'on')
if len(search_terms) > 0:
if folder and limit_search_to_folder and not folder.is_root:
folder_qs = folder.get_descendants()
file_qs = File.objects.filter(
folder__in=folder.get_descendants())
else:
folder_qs = Folder.objects.all()
file_qs = File.objects.all()
folder_qs = self.filter_folder(folder_qs, search_terms)
file_qs = self.filter_file(file_qs, search_terms)
show_result_count = True
else:
folder_qs = folder.media_folder_children.all()
file_qs = folder.files.all()
show_result_count = False
folder_qs = folder_qs.order_by('name')
order_by = request.GET.get('order_by', None)
if order_by is not None:
order_by = order_by.split(',')
order_by = [field for field in order_by
if re.sub(r'^-', '', field) in self.order_by_file_fields]
if len(order_by) > 0:
file_qs = file_qs.order_by(*order_by)
folder_children = []
folder_files = []
if folder.is_root:
folder_children += folder.virtual_folders
perms = FolderPermission.objects.get_read_id_list(request.user)
root_exclude_kw = {'parent__isnull': False, 'parent__id__in': perms}
if perms != 'All':
file_qs = file_qs.filter(Q(folder__id__in=perms) | Q(owner=request.user))
folder_qs = folder_qs.filter(Q(id__in=perms) | Q(owner=request.user))
else:
root_exclude_kw.pop('parent__id__in')
if folder.is_root:
folder_qs = folder_qs.exclude(**root_exclude_kw)
folder_children += folder_qs
folder_files += file_qs
try:
permissions = {
'has_edit_permission': folder.has_edit_permission(request),
'has_read_permission': folder.has_read_permission(request),
'has_add_children_permission': \
folder.has_add_children_permission(request),
}
except:
permissions = {}
if order_by is None or len(order_by) == 0:
folder_files.sort()
items = folder_children + folder_files
items_permissions = [(item, {'change': self.has_change_permission(request, item)}) for item in items]
paginator = Paginator(items_permissions, settings.MEDIA_PAGINATE_BY)
# Are we moving to clipboard?
if request.method == 'POST' and '_save' not in request.POST:
for f in folder_files:
if "move-to-clipboard-%d" % (f.id,) in request.POST:
clipboard = tools.get_user_clipboard(request.user)
if f.has_edit_permission(request):
tools.move_file_to_clipboard([f], clipboard)
return HttpResponseRedirect(request.get_full_path())
else:
raise PermissionDenied
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
# Actions with no confirmation
if (actions and request.method == 'POST' and
'index' in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, files_queryset=file_qs, folders_queryset=folder_qs)
if response:
return response
else:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg)
# Actions with confirmation
if (actions and request.method == 'POST' and
helpers.ACTION_CHECKBOX_NAME in request.POST and
'index' not in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, files_queryset=file_qs, folders_queryset=folder_qs)
if response:
return response
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
else:
action_form = None
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', paginator.count)
# If page request (9999) is out of range, deliver last page of results.
try:
paginated_items = paginator.page(request.GET.get('page', 1))
except PageNotAnInteger:
paginated_items = paginator.page(1)
except EmptyPage:
paginated_items = paginator.page(paginator.num_pages)
return render_to_response(
self.directory_listing_template,
{
'folder': folder,
'clipboard_files': File.objects.filter(
in_clipboards__clipboarditem__clipboard__user=request.user
).distinct(),
'paginator': paginator,
'paginated_items': paginated_items, # [(item, item_perms), ]
'permissions': permissions,
'permstest': userperms_for_request(folder, request),
'current_url': request.path,
'title': 'Directory listing for %s' % folder.name,
'search_string': ' '.join(search_terms),
'q': urlquote(q),
'show_result_count': show_result_count,
'limit_search_to_folder': limit_search_to_folder,
'is_popup': popup_status(request),
'select_folder': selectfolder_status(request),
# needed in the admin/base.html template for logout links
'root_path': reverse('admin:index'),
'action_form': action_form,
'actions_on_top': self.actions_on_top,
'actions_on_bottom': self.actions_on_bottom,
'actions_selection_counter': self.actions_selection_counter,
'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(paginated_items.object_list)},
'selection_note_all': selection_note_all % {'total_count': paginator.count},
'media': self.media,
'enable_permissions': settings.FILER_ENABLE_PERMISSIONS,
'can_make_folder': request.user.is_superuser or \
(folder.is_root and settings.FILER_ALLOW_REGULAR_USERS_TO_ADD_ROOT_FOLDERS) or \
permissions.get("has_add_children_permission"),
}, context_instance=RequestContext(request))
def filter_folder(self, qs, terms=[]):
for term in terms:
filters = Q(name__icontains=term)
for filter_ in self.get_owner_filter_lookups():
filters |= Q(**{filter_: term})
qs = qs.filter(filters)
return qs
def filter_file(self, qs, terms=[]):
for term in terms:
filters = (Q(name__icontains=term) |
Q(description__icontains=term) |
Q(original_filename__icontains=term))
for filter_ in self.get_owner_filter_lookups():
filters |= Q(**{filter_: term})
qs = qs.filter(filters)
return qs
@property
def owner_search_fields(self):
"""
Returns all the fields that are CharFields except for password from the
User model. For the built-in User model, that means username,
first_name, last_name, and email.
"""
try:
from django.contrib.auth import get_user_model
except ImportError: # Django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
return [
field.name for field in User._meta.fields
if isinstance(field, models.CharField) and field.name != 'password'
]
def get_owner_filter_lookups(self):
return [
'owner__{field}__icontains'.format(field=field)
for field in self.owner_search_fields
]
def response_action(self, request, files_queryset, folders_queryset):
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
select_across = action_form.cleaned_data['select_across']
func, name, description = self.get_actions(request)[action]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg)
return None
if not select_across:
selected_files = []
selected_folders = []
for pk in selected:
if pk[:5] == "file-":
selected_files.append(pk[5:])
else:
selected_folders.append(pk[7:])
# Perform the action only on the selected objects
files_queryset = files_queryset.filter(pk__in=selected_files)
folders_queryset = folders_queryset.filter(pk__in=selected_folders)
response = func(self, request, files_queryset, folders_queryset)
# Actions may return an HttpResponse, which will be used as the
# response from the POST. If not, we'll be a good little HTTP
# citizen and redirect back to the changelist page.
if isinstance(response, HttpResponse):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg)
return None
def get_actions(self, request):
actions = super(FolderAdmin, self).get_actions(request)
if 'delete_selected' in actions:
del actions['delete_selected']
return actions
def _actions_column(self, instance):
_actions = super(MyFolderAdmin, self)._actions_column(instance)
sync_url = reverse('admin:media-scan-folder', args=[instance.id])
sync_action = '<a class="ajax-modal" href="{0}">{1}</a>'.format(sync_url, _('Scan folder'))
_actions.append(sync_action)
create_url = reverse('admin:media-make-folder', args=[instance.id])
create_action = '<a class="ajax-modal" href="{0}">{1}</a>'.format(create_url, _('Create Subfolder'))
_actions.append(create_action)
return _actions
def actions_column(self, instance):
return ' - '.join(self._actions_column(instance))
actions_column.allow_tags = True
actions_column.short_description = _('actions')
def move_to_clipboard(self, request, files_queryset, folders_queryset):
"""
Action which moves the selected files and files in selected folders to clipboard.
"""
if not self.has_change_permission(request):
raise PermissionDenied
if request.method != 'POST':
return None
clipboard = tools.get_user_clipboard(request.user)
check_files_edit_permissions(request, files_queryset)
check_folder_edit_permissions(request, folders_queryset)
# TODO: Display a confirmation page if moving more than X files to clipboard?
files_count = [0] # We define it like that so that we can modify it inside the move_files function
def move_files(files):
files_count[0] += tools.move_file_to_clipboard(files, clipboard)
def move_folders(folders):
for f in folders:
move_files(f.files)
move_folders(f.media_folder_children.all())
move_files(files_queryset)
move_folders(folders_queryset)
self.message_user(request, _("Successfully moved %(count)d files to clipboard.") % {
"count": files_count[0],
})
return None
move_to_clipboard.short_description = ugettext_lazy("Move selected files to clipboard")
def files_set_public_or_private(self, request, set_public, files_queryset, folders_queryset):
"""
Action which enables or disables permissions for selected files and files in selected folders to clipboard (set them private or public).
"""
if not self.has_change_permission(request):
raise PermissionDenied
if request.method != 'POST':
return None
check_files_edit_permissions(request, files_queryset)
check_folder_edit_permissions(request, folders_queryset)
files_count = [0] # We define it like that so that we can modify it inside the set_files function
def set_files(files):
for f in files:
if f.is_public != set_public:
f.is_public = set_public
f.save()
files_count[0] += 1
def set_folders(folders):
for f in folders:
set_files(f.files)
set_folders(f.children.all())
set_files(files_queryset)
set_folders(folders_queryset)
if set_public:
self.message_user(request, _("Successfully disabled permissions for %(count)d files.") % {
"count": files_count[0],
})
else:
self.message_user(request, _("Successfully enabled permissions for %(count)d files.") % {
"count": files_count[0],
})
return None
def files_set_private(self, request, files_queryset, folders_queryset):
return self.files_set_public_or_private(request, False, files_queryset, folders_queryset)
files_set_private.short_description = ugettext_lazy("Enable permissions for selected files")
def files_set_public(self, request, files_queryset, folders_queryset):
return self.files_set_public_or_private(request, True, files_queryset, folders_queryset)
files_set_public.short_description = ugettext_lazy("Disable permissions for selected files")
def delete_files_or_folders(self, request, files_queryset, folders_queryset):
"""
Action which deletes the selected files and/or folders.
This action first displays a confirmation page whichs shows all the
deleteable files and/or folders, or, if the user has no permission on one of the related
childs (foreignkeys), a "permission denied" message.
Next, it delets all selected files and/or folders and redirects back to the folder.
"""
opts = self.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not self.has_delete_permission(request):
raise PermissionDenied
current_folder = self._get_current_action_folder(request, files_queryset, folders_queryset)
all_protected = []
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
# Hopefully this also checks for necessary permissions.
# TODO: Check if permissions are really verified
using = router.db_for_write(self.model)
deletable_files, perms_needed_files, protected_files = get_deleted_objects(files_queryset, files_queryset.model._meta, request.user, self.admin_site, using)
deletable_folders, perms_needed_folders, protected_folders = get_deleted_objects(folders_queryset, folders_queryset.model._meta, request.user, self.admin_site, using)
all_protected.extend(protected_files)
all_protected.extend(protected_folders)
all_deletable_objects = [deletable_files, deletable_folders]
all_perms_needed = perms_needed_files.union(perms_needed_folders)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post'):
if all_perms_needed:
raise PermissionDenied
n = files_queryset.count() + folders_queryset.count()
if n:
# delete all explicitly selected files
for f in files_queryset:
self.log_deletion(request, f, force_text(f))
f.delete()
# delete all files in all selected folders and their children
# This would happen automatically by ways of the delete cascade, but then the individual .delete()
# methods won't be called and the files won't be deleted from the filesystem.
folder_ids = set()
for folder in folders_queryset:
folder_ids.add(folder.id)
folder_ids.update(folder.get_descendants().values_list('id', flat=True))
for f in File.objects.filter(folder__in=folder_ids):
self.log_deletion(request, f, force_text(f))
f.delete()
# delete all folders
for f in folders_queryset:
self.log_deletion(request, f, force_text(f))
f.delete()
self.message_user(request, _("Successfully deleted %(count)d files and/or folders.") % {
"count": n,
})
# Return None to display the change list page again.
return None
if all_perms_needed or all_protected:
title = _("Cannot delete files and/or folders")
else:
title = _("Are you sure?")
context = {
"title": title,
"instance": current_folder,
"breadcrumbs_action": _("Delete files and/or folders"),
"deletable_objects": all_deletable_objects,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"perms_lacking": all_perms_needed,
"protected": all_protected,
"opts": opts,
'is_popup': popup_status(request),
'select_folder': selectfolder_status(request),
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
}
# Display the destination folder selection page
return render_to_response([
"admin/media/delete_selected_files_confirmation.html"
], context, context_instance=template.RequestContext(request))
delete_files_or_folders.short_description = ugettext_lazy("Delete selected files and/or folders")
# Copied from django.contrib.admin.util
def _format_callback(self, obj, user, admin_site, perms_needed):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
if has_admin:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.object_name.lower()),
None, (quote(obj._get_pk_val()),))
p = '%s.%s' % (opts.app_label,
get_delete_permission(opts))
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return mark_safe('%s: <a href="%s">%s</a>' %
(escape(capfirst(opts.verbose_name)),
admin_url,
escape(obj)))
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
def _check_copy_perms(self, request, files_queryset, folders_queryset):
try:
check_files_read_permissions(request, files_queryset)
check_folder_read_permissions(request, folders_queryset)
except PermissionDenied:
return True
return False
def _check_move_perms(self, request, files_queryset, folders_queryset):
try:
check_files_read_permissions(request, files_queryset)
check_folder_read_permissions(request, folders_queryset)
check_files_edit_permissions(request, files_queryset)
check_folder_edit_permissions(request, folders_queryset)
except PermissionDenied:
return True
return False
def _get_current_action_folder(self, request, files_queryset, folders_queryset):
if files_queryset:
return files_queryset[0].folder
elif folders_queryset:
return folders_queryset[0].parent
else:
return None
def _list_folders_to_copy_or_move(self, request, folders):
for fo in folders:
yield self._format_callback(fo, request.user, self.admin_site, set())
children = list(self._list_folders_to_copy_or_move(request, fo.media_folder_children.all()))
children.extend([self._format_callback(f, request.user, self.admin_site, set()) for f in sorted(fo.files)])
if children:
yield children
def _list_all_to_copy_or_move(self, request, files_queryset, folders_queryset):
to_copy_or_move = list(self._list_folders_to_copy_or_move(request, folders_queryset))
to_copy_or_move.extend([self._format_callback(f, request.user, self.admin_site, set()) for f in sorted(files_queryset)])
return to_copy_or_move
def _list_all_destination_folders_recursive(self, request, folders_queryset, current_folder, folders, allow_self, level):
for fo in folders:
if not allow_self and fo in folders_queryset:
# We do not allow moving to selected folders or their descendants
continue
if not fo.has_read_permission(request):
continue
# We do not allow copying/moving back to the folder itself
enabled = (allow_self or fo != current_folder) and fo.has_add_children_permission(request)
yield (fo, (mark_safe((" " * level) + force_text(fo)), enabled))
for c in self._list_all_destination_folders_recursive(request, folders_queryset, current_folder, fo.media_folder_children.all(), allow_self, level + 1):
yield c
def _list_all_destination_folders(self, request, folders_queryset, current_folder, allow_self):
return list(self._list_all_destination_folders_recursive(request, folders_queryset, current_folder, FolderRoot().children, allow_self, 0))
def _move_files_and_folders_impl(self, files_queryset, folders_queryset, destination):
for f in files_queryset:
f.folder = destination
f.save()
for f in folders_queryset:
f.move_to(destination, 'last-child')
f.save()
def move_files_and_folders(self, request, files_queryset, folders_queryset):
opts = self.model._meta
app_label = opts.app_label
current_folder = self._get_current_action_folder(request, files_queryset, folders_queryset)
perms_needed = self._check_move_perms(request, files_queryset, folders_queryset)
to_move = self._list_all_to_copy_or_move(request, files_queryset, folders_queryset)
folders = self._list_all_destination_folders(request, folders_queryset, current_folder, False)
if request.method == 'POST' and request.POST.get('post'):
if perms_needed:
raise PermissionDenied
try:
destination = Folder.objects.get(pk=request.POST.get('destination'))
except Folder.DoesNotExist:
raise PermissionDenied
folders_dict = dict(folders)
if destination not in folders_dict or not folders_dict[destination][1]:
raise PermissionDenied
# We count only topmost files and folders here
n = files_queryset.count() + folders_queryset.count()
conflicting_names = [folder.name for folder in Folder.objects.filter(
parent=destination,
name__in=folders_queryset.values('name'))]
if conflicting_names:
messages.error(request, _("Folders with names %s already exist at the selected "
"destination") % ", ".join(conflicting_names))
elif n:
self._move_files_and_folders_impl(files_queryset, folders_queryset, destination)
self.message_user(request, _("Successfully moved %(count)d files and/or folders to folder '%(destination)s'.") % {
"count": n,
"destination": destination,
})
return None
context = {
"title": _("Move files and/or folders"),
"instance": current_folder,
"breadcrumbs_action": _("Move files and/or folders"),
"to_move": to_move,
"destination_folders": folders,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
}
# Display the destination folder selection page
return render_to_response([
"admin/media/folder/choose_move_destination.html"
], context, context_instance=template.RequestContext(request))
move_files_and_folders.short_description = ugettext_lazy("Move selected files and/or folders")
def _rename_file(self, file_obj, form_data, counter, global_counter):
original_basename, original_extension = os.path.splitext(file_obj.original_filename)
if file_obj.name:
current_basename, current_extension = os.path.splitext(file_obj.name)
else:
current_basename = ""
current_extension = ""
file_obj.name = form_data['rename_format'] % {
'original_filename': file_obj.original_filename,
'original_basename': original_basename,
'original_extension': original_extension,
'current_filename': file_obj.name or "",
'current_basename': current_basename,
'current_extension': current_extension,
'current_folder': file_obj.folder.name,
'counter': counter + 1, # 1-based
'global_counter': global_counter + 1, # 1-based
}
file_obj.save()
def _rename_files(self, files, form_data, global_counter):
n = 0
for f in sorted(files):
self._rename_file(f, form_data, n, global_counter + n)
n += 1
return n
def _rename_folder(self, folder, form_data, global_counter):
return self._rename_files_impl(folder.files.all(), folder.media_folder_children.all(), form_data, global_counter)
def _rename_files_impl(self, files_queryset, folders_queryset, form_data, global_counter):
n = 0
for f in folders_queryset:
n += self._rename_folder(f, form_data, global_counter + n)
n += self._rename_files(files_queryset, form_data, global_counter + n)
return n
def rename_files(self, request, files_queryset, folders_queryset):
opts = self.model._meta
app_label = opts.app_label
current_folder = self._get_current_action_folder(request, files_queryset, folders_queryset)
perms_needed = self._check_move_perms(request, files_queryset, folders_queryset)
to_rename = self._list_all_to_copy_or_move(request, files_queryset, folders_queryset)
if request.method == 'POST' and request.POST.get('post'):
if perms_needed:
raise PermissionDenied
form = RenameFilesForm(request.POST)
if form.is_valid():
if files_queryset.count() + folders_queryset.count():
n = self._rename_files_impl(files_queryset, folders_queryset, form.cleaned_data, 0)
self.message_user(request, _("Successfully renamed %(count)d files.") % {
"count": n,
})
return None
else:
form = RenameFilesForm()
context = {
"title": _("Rename files"),
"instance": current_folder,
"breadcrumbs_action": _("Rename files"),
"to_rename": to_rename,
"rename_form": form,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
}
# Display the rename format selection page
return render_to_response([
"admin/media/folder/choose_rename_format.html"
], context, context_instance=template.RequestContext(request))
rename_files.short_description = ugettext_lazy("Rename files")
def _generate_new_filename(self, filename, suffix):
basename, extension = os.path.splitext(filename)
return basename + suffix + extension
def _copy_file(self, file_obj, destination, suffix, overwrite):
if overwrite:
# Not yet implemented as we have to find a portable (for different storage backends) way to overwrite files
raise NotImplementedError
# We are assuming here that we are operating on an already saved database objects with current database state available
filename = self._generate_new_filename(file_obj.file.name, suffix)
# Due to how inheritance works, we have to set both pk and id to None
file_obj.pk = None
file_obj.id = None
file_obj.save()
file_obj.folder = destination
file_obj.file = file_obj._copy_file(filename)
file_obj.original_filename = self._generate_new_filename(file_obj.original_filename, suffix)
file_obj.save()
def _copy_files(self, files, destination, suffix, overwrite):
for f in files:
self._copy_file(f, destination, suffix, overwrite)
return len(files)
def _get_available_name(self, destination, name):
count = itertools.count(1)
original = name
while destination.contains_folder(name):
name = "%s_%s" % (original, next(count))
return name
def _copy_folder(self, folder, destination, suffix, overwrite):
if overwrite:
# Not yet implemented as we have to find a portable (for different storage backends) way to overwrite files
raise NotImplementedError
# TODO: Should we also allow not to overwrite the folder if it exists, but just copy into it?
# TODO: Is this a race-condition? Would this be a problem?
foldername = self._get_available_name(destination, folder.name)
old_folder = Folder.objects.get(pk=folder.pk)
# Due to how inheritance works, we have to set both pk and id to None
folder.pk = None
folder.id = None
folder.name = foldername
folder.insert_at(destination, 'last-child', True) # We save folder here
for perm in FolderPermission.objects.filter(folder=old_folder):
perm.pk = None
perm.id = None
perm.folder = folder
perm.save()
return 1 + self._copy_files_and_folders_impl(old_folder.files.all(), old_folder.media_folder_children.all(), folder, suffix, overwrite)
def _copy_files_and_folders_impl(self, files_queryset, folders_queryset, destination, suffix, overwrite):
n = self._copy_files(files_queryset, destination, suffix, overwrite)
for f in folders_queryset:
n += self._copy_folder(f, destination, suffix, overwrite)
return n
def copy_files_and_folders(self, request, files_queryset, folders_queryset):
opts = self.model._meta
app_label = opts.app_label
current_folder = self._get_current_action_folder(request, files_queryset, folders_queryset)
perms_needed = self._check_copy_perms(request, files_queryset, folders_queryset)
to_copy = self._list_all_to_copy_or_move(request, files_queryset, folders_queryset)
folders = self._list_all_destination_folders(request, folders_queryset, current_folder, False)
if request.method == 'POST' and request.POST.get('post'):
if perms_needed:
raise PermissionDenied
form = CopyFilesAndFoldersForm(request.POST)
if form.is_valid():
try:
destination = Folder.objects.get(pk=request.POST.get('destination'))
except Folder.DoesNotExist:
raise PermissionDenied
folders_dict = dict(folders)
if destination not in folders_dict or not folders_dict[destination][1]:
raise PermissionDenied
if files_queryset.count() + folders_queryset.count():
# We count all files and folders here (recursivelly)
n = self._copy_files_and_folders_impl(files_queryset, folders_queryset, destination, form.cleaned_data['suffix'], False)
self.message_user(request, _("Successfully copied %(count)d files and/or folders to folder '%(destination)s'.") % {
"count": n,
"destination": destination,
})
return None
else:
form = CopyFilesAndFoldersForm()
try:
selected_destination_folder = int(request.POST.get('destination', 0))
except ValueError:
if current_folder:
selected_destination_folder = current_folder.pk
else:
selected_destination_folder = 0
context = {
"title": _("Copy files and/or folders"),
"instance": current_folder,
"breadcrumbs_action": _("Copy files and/or folders"),
"to_copy": to_copy,
"destination_folders": folders,
"selected_destination_folder": selected_destination_folder,
"copy_form": form,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
}
# Display the destination folder selection page
return render_to_response([
"admin/media/folder/choose_copy_destination.html"
], context, context_instance=template.RequestContext(request))
copy_files_and_folders.short_description = ugettext_lazy("Copy selected files and/or folders")
def _check_resize_perms(self, request, files_queryset, folders_queryset):
try:
check_files_read_permissions(request, files_queryset)
check_folder_read_permissions(request, folders_queryset)
check_files_edit_permissions(request, files_queryset)
except PermissionDenied:
return True
return False
def _list_folders_to_resize(self, request, folders):
for fo in folders:
media_folder_children = list(self._list_folders_to_resize(request, fo.children.all()))
children.extend([self._format_callback(f, request.user, self.admin_site, set()) for f in sorted(fo.files) if isinstance(f, Image)])
if children:
yield self._format_callback(fo, request.user, self.admin_site, set())
yield children
def _list_all_to_resize(self, request, files_queryset, folders_queryset):
to_resize = list(self._list_folders_to_resize(request, folders_queryset))
to_resize.extend([self._format_callback(f, request.user, self.admin_site, set()) for f in sorted(files_queryset) if isinstance(f, Image)])
return to_resize
def _new_subject_location(self, original_width, original_height, new_width, new_height, x, y, crop):
# TODO: We could probably do better
return (round(new_width / 2), round(new_height / 2))
def _resize_image(self, image, form_data):
original_width = float(image.width)
original_height = float(image.height)
thumbnailer = FilerActionThumbnailer(file=image.file.file, name=image.file.name, source_storage=image.file.source_storage, thumbnail_storage=image.file.source_storage)
# This should overwrite the original image
new_image = thumbnailer.get_thumbnail({
'size': (form_data['width'], form_data['height']),
'crop': form_data['crop'],
'upscale': form_data['upscale'],
'subject_location': image.subject_location,
})
image.file.file = new_image.file
image.generate_sha1()
image.save() # Also gets new width and height
subject_location = normalize_subject_location(image.subject_location)
if subject_location:
(x, y) = subject_location
x = float(x)
y = float(y)
new_width = float(image.width)
new_height = float(image.height)
(new_x, new_y) = self._new_subject_location(original_width, original_height, new_width, new_height, x, y, form_data['crop'])
image.subject_location = "%d,%d" % (new_x, new_y)
image.save()
def _resize_images(self, files, form_data):
n = 0
for f in files:
if isinstance(f, Image):
self._resize_image(f, form_data)
n += 1
return n
def _resize_folder(self, folder, form_data):
return self._resize_images_impl(folder.files.all(), folder.filer-directory_listing.all(), form_data)
def _resize_images_impl(self, files_queryset, folders_queryset, form_data):
n = self._resize_images(files_queryset, form_data)
for f in folders_queryset:
n += self._resize_folder(f, form_data)
return n
def resize_images(self, request, files_queryset, folders_queryset):
opts = self.model._meta
app_label = opts.app_label
current_folder = self._get_current_action_folder(request, files_queryset, folders_queryset)
perms_needed = self._check_resize_perms(request, files_queryset, folders_queryset)
to_resize = self._list_all_to_resize(request, files_queryset, folders_queryset)
if request.method == 'POST' and request.POST.get('post'):
if perms_needed:
raise PermissionDenied
form = ResizeImagesForm(request.POST)
if form.is_valid():
if form.cleaned_data.get('thumbnail_option'):
form.cleaned_data['width'] = form.cleaned_data['thumbnail_option'].width
form.cleaned_data['height'] = form.cleaned_data['thumbnail_option'].height
form.cleaned_data['crop'] = form.cleaned_data['thumbnail_option'].crop
form.cleaned_data['upscale'] = form.cleaned_data['thumbnail_option'].upscale
if files_queryset.count() + folders_queryset.count():
# We count all files here (recursivelly)
n = self._resize_images_impl(files_queryset, folders_queryset, form.cleaned_data)
self.message_user(request, _("Successfully resized %(count)d images.") % {
"count": n,
})
return None
else:
form = ResizeImagesForm()
context = {
"title": _("Resize images"),
"instance": current_folder,
"breadcrumbs_action": _("Resize images"),
"to_resize": to_resize,
"resize_form": form,
"cmsplugin_enabled": 'cmsplugin_filer_image' in django_settings.INSTALLED_APPS,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
}
# Display the resize options page
return render_to_response([
"admin/media/folder/choose_images_resize_options.html"
], context, context_instance=template.RequestContext(request))
resize_images.short_description = ugettext_lazy("Resize selected images")
|
|
"""
Wheel command-line utility.
"""
import os
import hashlib
import sys
import json
import wheel.paths
from glob import iglob
from .. import signatures
from ..util import (urlsafe_b64decode, urlsafe_b64encode, native, binary,
matches_requirement)
from ..install import WheelFile
def require_pkgresources(name):
try:
import pkg_resources
except ImportError:
raise RuntimeError("'{0}' needs pkg_resources (part of setuptools).".format(name))
import argparse
class WheelError(Exception): pass
# For testability
def get_keyring():
try:
from ..signatures import keys
import keyring
except ImportError:
raise WheelError("Install wheel[signatures] (requires keyring, dirspec) for signatures.")
return keys.WheelKeys, keyring
def keygen(get_keyring=get_keyring):
"""Generate a public/private key pair."""
WheelKeys, keyring = get_keyring()
ed25519ll = signatures.get_ed25519ll()
wk = WheelKeys().load()
keypair = ed25519ll.crypto_sign_keypair()
vk = native(urlsafe_b64encode(keypair.vk))
sk = native(urlsafe_b64encode(keypair.sk))
kr = keyring.get_keyring()
kr.set_password("wheel", vk, sk)
sys.stdout.write("Created Ed25519 keypair with vk={0}\n".format(vk))
if isinstance(kr, keyring.backend.BasicFileKeyring):
sys.stdout.write("in {0}\n".format(kr.file_path))
else:
sys.stdout.write("in %r\n" % kr.__class__)
sk2 = kr.get_password('wheel', vk)
if sk2 != sk:
raise WheelError("Keyring is broken. Could not retrieve secret key.")
sys.stdout.write("Trusting {0} to sign and verify all packages.\n".format(vk))
wk.add_signer('+', vk)
wk.trust('+', vk)
wk.save()
def sign(wheelfile, replace=False, get_keyring=get_keyring):
"""Sign a wheel"""
WheelKeys, keyring = get_keyring()
ed25519ll = signatures.get_ed25519ll()
wf = WheelFile(wheelfile, append=True)
wk = WheelKeys().load()
name = wf.parsed_filename.group('name')
sign_with = wk.signers(name)[0]
sys.stdout.write("Signing {0} with {1}\n".format(name, sign_with[1]))
vk = sign_with[1]
kr = keyring.get_keyring()
sk = kr.get_password('wheel', vk)
keypair = ed25519ll.Keypair(urlsafe_b64decode(binary(vk)),
urlsafe_b64decode(binary(sk)))
record_name = wf.distinfo_name + '/RECORD'
sig_name = wf.distinfo_name + '/RECORD.jws'
if sig_name in wf.zipfile.namelist():
raise WheelError("Wheel is already signed.")
record_data = wf.zipfile.read(record_name)
payload = {"hash":"sha256=" + native(urlsafe_b64encode(hashlib.sha256(record_data).digest()))}
sig = signatures.sign(payload, keypair)
wf.zipfile.writestr(sig_name, json.dumps(sig, sort_keys=True))
wf.zipfile.close()
def unsign(wheelfile):
"""
Remove RECORD.jws from a wheel by truncating the zip file.
RECORD.jws must be at the end of the archive. The zip file must be an
ordinary archive, with the compressed files and the directory in the same
order, and without any non-zip content after the truncation point.
"""
import wheel.install
vzf = wheel.install.VerifyingZipFile(wheelfile, "a")
info = vzf.infolist()
if not (len(info) and info[-1].filename.endswith('/RECORD.jws')):
raise WheelError("RECORD.jws not found at end of archive.")
vzf.pop()
vzf.close()
def verify(wheelfile):
"""Verify a wheel.
The signature will be verified for internal consistency ONLY and printed.
Wheel's own unpack/install commands verify the manifest against the
signature and file contents.
"""
wf = WheelFile(wheelfile)
sig_name = wf.distinfo_name + '/RECORD.jws'
sig = json.loads(native(wf.zipfile.open(sig_name).read()))
verified = signatures.verify(sig)
sys.stderr.write("Signatures are internally consistent.\n")
sys.stdout.write(json.dumps(verified, indent=2))
sys.stdout.write('\n')
def unpack(wheelfile, dest='.'):
"""Unpack a wheel.
Wheel content will be unpacked to {dest}/{name}-{ver}, where {name}
is the package name and {ver} its version.
:param wheelfile: The path to the wheel.
:param dest: Destination directory (default to current directory).
"""
wf = WheelFile(wheelfile)
namever = wf.parsed_filename.group('namever')
destination = os.path.join(dest, namever)
sys.stderr.write("Unpacking to: %s\n" % (destination))
wf.zipfile.extractall(destination)
wf.zipfile.close()
def install(requirements, requirements_file=None,
wheel_dirs=None, force=False, list_files=False,
dry_run=False):
"""Install wheels.
:param requirements: A list of requirements or wheel files to install.
:param requirements_file: A file containing requirements to install.
:param wheel_dirs: A list of directories to search for wheels.
:param force: Install a wheel file even if it is not compatible.
:param list_files: Only list the files to install, don't install them.
:param dry_run: Do everything but the actual install.
"""
# If no wheel directories specified, use the WHEELPATH environment
# variable, or the current directory if that is not set.
if not wheel_dirs:
wheelpath = os.getenv("WHEELPATH")
if wheelpath:
wheel_dirs = wheelpath.split(os.pathsep)
else:
wheel_dirs = [ os.path.curdir ]
# Get a list of all valid wheels in wheel_dirs
all_wheels = []
for d in wheel_dirs:
for w in os.listdir(d):
if w.endswith('.whl'):
wf = WheelFile(os.path.join(d, w))
if wf.compatible:
all_wheels.append(wf)
# If there is a requirements file, add it to the list of requirements
if requirements_file:
# If the file doesn't exist, search for it in wheel_dirs
# This allows standard requirements files to be stored with the
# wheels.
if not os.path.exists(requirements_file):
for d in wheel_dirs:
name = os.path.join(d, requirements_file)
if os.path.exists(name):
requirements_file = name
break
with open(requirements_file) as fd:
requirements.extend(fd)
to_install = []
for req in requirements:
if req.endswith('.whl'):
# Explicitly specified wheel filename
if os.path.exists(req):
wf = WheelFile(req)
if wf.compatible or force:
to_install.append(wf)
else:
msg = ("{0} is not compatible with this Python. "
"--force to install anyway.".format(req))
raise WheelError(msg)
else:
# We could search on wheel_dirs, but it's probably OK to
# assume the user has made an error.
raise WheelError("No such wheel file: {}".format(req))
continue
# We have a requirement spec
# If we don't have pkg_resources, this will raise an exception
matches = matches_requirement(req, all_wheels)
if not matches:
raise WheelError("No match for requirement {}".format(req))
to_install.append(max(matches))
# We now have a list of wheels to install
if list_files:
sys.stdout.write("Installing:\n")
if dry_run:
return
for wf in to_install:
if list_files:
sys.stdout.write(" {0}\n".format(wf.filename))
continue
wf.install(force=force)
wf.zipfile.close()
def install_scripts(distributions):
"""
Regenerate the entry_points console_scripts for the named distribution.
"""
try:
from setuptools.command import easy_install
import pkg_resources
except ImportError:
raise RuntimeError("'wheel install_scripts' needs setuptools.")
for dist in distributions:
pkg_resources_dist = pkg_resources.get_distribution(dist)
install = wheel.paths.get_install_command(dist)
command = easy_install.easy_install(install.distribution)
command.args = ['wheel'] # dummy argument
command.finalize_options()
command.install_egg_scripts(pkg_resources_dist)
def convert(installers, dest_dir, verbose):
require_pkgresources('wheel convert')
# Only support wheel convert if pkg_resources is present
from ..wininst2wheel import bdist_wininst2wheel
from ..egg2wheel import egg2wheel
for pat in installers:
for installer in iglob(pat):
if os.path.splitext(installer)[1] == '.egg':
conv = egg2wheel
else:
conv = bdist_wininst2wheel
if verbose:
sys.stdout.write("{0}... ".format(installer))
sys.stdout.flush()
conv(installer, dest_dir)
if verbose:
sys.stdout.write("OK\n")
def parser():
p = argparse.ArgumentParser()
s = p.add_subparsers(help="commands")
def keygen_f(args):
keygen()
keygen_parser = s.add_parser('keygen', help='Generate signing key')
keygen_parser.set_defaults(func=keygen_f)
def sign_f(args):
sign(args.wheelfile)
sign_parser = s.add_parser('sign', help='Sign wheel')
sign_parser.add_argument('wheelfile', help='Wheel file')
sign_parser.set_defaults(func=sign_f)
def unsign_f(args):
unsign(args.wheelfile)
unsign_parser = s.add_parser('unsign', help=unsign.__doc__)
unsign_parser.add_argument('wheelfile', help='Wheel file')
unsign_parser.set_defaults(func=unsign_f)
def verify_f(args):
verify(args.wheelfile)
verify_parser = s.add_parser('verify', help=verify.__doc__)
verify_parser.add_argument('wheelfile', help='Wheel file')
verify_parser.set_defaults(func=verify_f)
def unpack_f(args):
unpack(args.wheelfile, args.dest)
unpack_parser = s.add_parser('unpack', help='Unpack wheel')
unpack_parser.add_argument('--dest', '-d', help='Destination directory',
default='.')
unpack_parser.add_argument('wheelfile', help='Wheel file')
unpack_parser.set_defaults(func=unpack_f)
def install_f(args):
install(args.requirements, args.requirements_file,
args.wheel_dirs, args.force, args.list_files)
install_parser = s.add_parser('install', help='Install wheels')
install_parser.add_argument('requirements', nargs='*',
help='Requirements to install.')
install_parser.add_argument('--force', default=False,
action='store_true',
help='Install incompatible wheel files.')
install_parser.add_argument('--wheel-dir', '-d', action='append',
dest='wheel_dirs',
help='Directories containing wheels.')
install_parser.add_argument('--requirements-file', '-r',
help="A file containing requirements to "
"install.")
install_parser.add_argument('--list', '-l', default=False,
dest='list_files',
action='store_true',
help="List wheels which would be installed, "
"but don't actually install anything.")
install_parser.set_defaults(func=install_f)
def install_scripts_f(args):
install_scripts(args.distributions)
install_scripts_parser = s.add_parser('install-scripts', help='Install console_scripts')
install_scripts_parser.add_argument('distributions', nargs='*',
help='Regenerate console_scripts for these distributions')
install_scripts_parser.set_defaults(func=install_scripts_f)
def convert_f(args):
convert(args.installers, args.dest_dir, args.verbose)
convert_parser = s.add_parser('convert', help='Convert egg or wininst to wheel')
convert_parser.add_argument('installers', nargs='*', help='Installers to convert')
convert_parser.add_argument('--dest-dir', '-d', default=os.path.curdir,
help="Directory to store wheels (default %(default)s)")
convert_parser.add_argument('--verbose', '-v', action='store_true')
convert_parser.set_defaults(func=convert_f)
def version_f(args):
from .. import __version__
sys.stdout.write("wheel %s\n" % __version__)
version_parser = s.add_parser('version', help='Print version and exit')
version_parser.set_defaults(func=version_f)
def help_f(args):
p.print_help()
help_parser = s.add_parser('help', help='Show this help')
help_parser.set_defaults(func=help_f)
return p
def main():
p = parser()
args = p.parse_args()
if not hasattr(args, 'func'):
p.print_help()
else:
# XXX on Python 3.3 we get 'args has no func' rather than short help.
try:
args.func(args)
return 0
except WheelError as e:
sys.stderr.write(e.message + "\n")
return 1
|
|
import sys
import time
import profile
#sys.path.insert(0, "..")
#sys.path.insert(0, "ddglib")
from klab import colortext
from klab.deprecated import rosettadb
from klab.debug.profile import ProfileTimer
from kddg.api import db, dbi
from ddglib import help as ddg_help
from ddglib.ddgfilters import *
from klab import pdb
import klab.deprecated.rosettahelper
def simpleRunExample(self):
# Step 1: Open a database connection
ddGdb = dbi.ddGDatabase()
# Step 2: Select database records
sr = StructureResultSet(ddGdb, AdditionalIDs = ['2BQC', '1LAW', '1LHH', '1LHI'])
# Step 3: Add filters
sr.addFilter(StructureFilter.TotalBFactors(0,16) | StructureFilter.WithNullResolution(True))
# Step 4: Retrieve full database records.
# results will be a list each of whose elements is a dict representing a database record.
results = sr.getFilteredResults()
# Step 5: Optionally print summary
print("\nSummary: %s\n" % sr)
def help():
ddg_help.ShowDatabaseStructure()
ddg_help.ShowResultSet()
ddg_help.ShowFilter()
def dump_zip(jobnumber):
ddG_connection = db.ddG()
ddG_connection.dumpData("testzip-%d.zip" % jobnumber, jobnumber)
class JobRunner:
# Class to contain old code used to kick off jobs
#e.g.
# JobRunner.addLinsJobs("lin-3K0NB", "Kellogg:10.1002/prot.22921:protocol16:32231")
# JobRunner.runLizsSet("lizsettest1", "Kellogg:10.1002/prot.22921:protocol16:32231")
@staticmethod
def addLinsJobs(PredictionSet, ProtocolID):
raise colortext.Exception("Do you really want to run this?")
colortext.printf("\nAdding Lin's mutations to %s prediction set." % PredictionSet, "lightgreen")
KeepHETATMLines = False
FilterTester.openDB()
# Filter by the DummySource set of experiments
er1 = ExperimentResultSet(ddGdb)
ef1 = ExperimentFilter()
ef1.setSource(ExperimentFilter.DummySource)
er1.addFilter(ef1)
# Filter by the particular PDB
sr = StructureResultSet(ddGdb, 'WHERE PDB_ID="3K0NB_lin"')
er1 = ExperimentResultSet.fromIDs(ddGdb, er1.getFilteredIDs()).filterBySet(sr)
FilterTester.test(er1)
experimentIDs = sorted(list(er1.getFilteredIDs()))
colortext.message("\nThe number of unique experiments is %d.\n" % len(experimentIDs))
ddG_connection = db.ddG()
count = 0
for experimentID in experimentIDs:
ddG_connection.addPrediction(experimentID, PredictionSet, ProtocolID, KeepHETATMLines, StoreOutput = True)
count += 1
if count >= 10:
colortext.write(".")
colortext.flush()
count = 0
print("")
@staticmethod
def runLizsSet(PredictionSet, ProtocolID):
raise colortext.Exception("Do you really want to run this?")
colortext.printf("\nAdding Liz's data set to %s prediction set." % PredictionSet, "lightgreen")
KeepHETATMLines = False
FilterTester.openDB()
# Filter by the DummySource set of experiments
er1 = ExperimentResultSet(ddGdb)
ef1 = ExperimentFilter()
ef1.setSource(ExperimentFilter.LizKellogg)
er1.addFilter(ef1)
FilterTester.test(er1)
experimentIDs = sorted(list(er1.getFilteredIDs()))
colortext.message("\nThe number of unique experiments is %d.\n" % len(experimentIDs))
ddG_connection = db.ddG()
count = 0
for experimentID in experimentIDs:
ddG_connection.addPrediction(experimentID, PredictionSet, ProtocolID, KeepHETATMLines, StoreOutput = True)
count += 1
if count >= 10:
colortext.write(".")
colortext.flush()
count = 0
print("")
@staticmethod
def addAllMutationsForAGivenPDB1():
'''Used to create dummy Experiment records for Lin's DDG run. This should probably be an API function.'''
ddG_connection = db.ddG()
opdb = common.pdb.PDB("3K0NA_lin.pdb")
count = 1
for chainresidueid, wt in sorted(opdb.ProperResidueIDToAAMap().iteritems()):
chain = chainresidueid[0]
residueid = chainresidueid[1:].strip()
allotherAAs = sorted([aa for aa in klab.deprecated.rosettahelper.ROSETTAWEB_SK_AAinv.keys() if aa != wt])
for otherAA in allotherAAs:
ms = db.MutationSet()
ms.addMutation(chain, residueid, wt, otherAA)
print("3K0NA_lin", ms, ms.getChains(), count, 0)
ddG_connection.createDummyExperiment("3K0NA_lin", ms, ms.getChains(), count, 0, ExperimentSetName = "DummySource")
count += 1
@staticmethod
def addAllMutationsForAGivenPDB2():
'''Used to create dummy Experiment records for Lin's DDG run. This should probably be an API function.'''
ddG_connection = db.ddG()
opdb = common.pdb.PDB("3K0On_lin.pdb")
count = 3098
for chainresidueid, wt in sorted(opdb.ProperResidueIDToAAMap().iteritems()):
chain = chainresidueid[0]
residueid = chainresidueid[1:].strip()
allotherAAs = sorted([aa for aa in klab.deprecated.rosettahelper.ROSETTAWEB_SK_AAinv.keys() if aa != wt])
for otherAA in allotherAAs:
ms = db.MutationSet()
ms.addMutation(chain, residueid, wt, otherAA)
print("3K0On_lin", ms, ms.getChains(), count, 0)
ddG_connection.createDummyExperiment("3K0On_lin", ms, ms.getChains(), count, 0, ExperimentSetName = "DummySource")
count += 1
@staticmethod
def addAllMutationsForAGivenPDB3():
'''Used to create dummy Experiment records for Lin's DDG run. This should probably be an API function.'''
FilterTester.openDB()
ddG_connection = db.ddG()
opdb = common.pdb.PDB("pdbs/3K0NB_lin.pdb")
results = ddGdb.execute('''SELECT SourceID FROM ExperimentScore INNER JOIN Experiment ON ExperimentScore.ExperimentID = Experiment.ID WHERE Source="DummySource"''', cursorClass=dbi.StdCursor)
assert(results)
highestID = max([int(r[0]) for r in results])
count = highestID + 1
for chainresidueid, wt in sorted(opdb.ProperResidueIDToAAMap().iteritems()):
chain = chainresidueid[0]
residueid = chainresidueid[1:].strip()
allotherAAs = sorted([aa for aa in klab.deprecated.rosettahelper.ROSETTAWEB_SK_AAinv.keys() if aa != wt])
for otherAA in allotherAAs:
ms = db.MutationSet()
ms.addMutation(chain, residueid, wt, otherAA)
print("3K0NB_lin", ms, ms.getChains(), count, 0, chain, wt, residueid, otherAA)
ddG_connection.createDummyExperiment("3K0NB_lin", ms, ms.getChains(), count, 0, ExperimentSetName = "DummySource")
count += 1
class Analyzer:
# Class to contain old code used to kick off analysis
@staticmethod
def testAnalysis():
ddG_connection = db.ddG()
pr = PredictionResultSet(ddGdb, SQL = "WHERE PredictionSet='kellogg16-A' AND Status='done' LIMIT 2000")
ddG_connection.analyze(pr)
@staticmethod
def testAnalysis2():
ddG_connection = db.ddG()
pr = PredictionResultSet(ddGdb, SQL = "WHERE PredictionSet='lizsettest1' AND Status='done' LIMIT 2000")
ddG_connection.analyze(pr)
pr = PredictionResultSet(ddGdb, SQL = "WHERE PredictionSet='lizsettest1' AND Status='done' LIMIT 2000")
pr.addFilter(ExperimentFilter.MutationsBetweenAminoAcidSizes(ExperimentFilter.large, ExperimentFilter.small))
FilterTester.test(pr)
ddG_connection.analyze(pr)
pr = PredictionResultSet(ddGdb, SQL = "WHERE PredictionSet='lizsettest1' AND Status='done' LIMIT 2000")
pr.addFilter(ExperimentFilter.MutationsBetweenAminoAcidSizes(ExperimentFilter.small, ExperimentFilter.large))
FilterTester.test(pr)
ddG_connection.analyze(pr)
class FilterTester:
@staticmethod
def profile(command_):
t1 = time.time()
profile.run(command_, sort = 'cumulative')
print("** Total time taken in %s: %0.2f **" % (command_, time.time() - t1))
@staticmethod
def test(resultset, expected_size = None):
print("Applying filters")
results = resultset.getFilteredResults(just_get_primary_keys = True)
print(len(results), expected_size)
assert(len(results) == expected_size)
print("After application")
print("\nSummary: %s\n" % resultset)
@staticmethod
def openDB():
if not globals().get("ddGdb"):
globals()["ddGdb"] = dbi.ddGDatabase()
total_number_of_experiments = ddGdb.execute('SELECT COUNT(ID) AS C FROM Experiment')[0]['C']
globals()["total_number_of_experiments"] = total_number_of_experiments
# UnionFilter examples
@staticmethod
def unionFilterExample1():
t1 = time.time()
print("** All structures with null OR non-null resolution **")
FilterTester.openDB()
sr = StructureResultSet(ddGdb)
sr.addFilter(StructureFilter.WithNullResolution(False) | StructureFilter.WithNullResolution(True))
FilterTester.test(sr, 848)
@staticmethod
def unionFilterExample2():
print("** All structures with null AND non-null resolution**")
FilterTester.openDB()
sr = StructureResultSet(ddGdb)
sr.addFilter(StructureFilter.WithNullResolution(False))
sr.addFilter(StructureFilter.WithNullResolution(True))
FilterTester.test(sr, 0)
# StructureResultSet examples
@staticmethod
def allStructures():
'''Select all Structure records.'''
print("** All structures **")
FilterTester.openDB()
sr = StructureResultSet(ddGdb)
FilterTester.test(sr, 848)
@staticmethod
def getStructuresWithNullResolutionSQL():
print("** All structures with null resolution **")
FilterTester.openDB()
sr = StructureResultSet(ddGdb, SQL = "WHERE Resolution IS NULL")
FilterTester.test(sr, 95)
@staticmethod
def getStructuresWithNullResolutionFilter():
print("** All structures with null resolution **")
FilterTester.openDB()
sr = StructureResultSet(ddGdb)
sr.addFilter(StructureFilter.WithNullResolution(True))
FilterTester.test(sr, 95)
@staticmethod
def pickSpecific():
'''Select four specific Structure records and apply a filter.'''
print("** 4 specific structures **")
FilterTester.openDB()
sr = StructureResultSet(ddGdb, AdditionalIDs = ['2BQC', '1LAW', '1LHH', '1LHI'])
sr.addFilter(StructureFilter.TotalBFactors(0,16) | StructureFilter.WithNullResolution(True))
FilterTester.test(sr, 2)
@staticmethod
def getStructuresInResolutionRange():
print("** All structures with null resolution **")
FilterTester.openDB()
sr = StructureResultSet(ddGdb)
sr.addFilter(StructureFilter.Resolution(1, 2))
FilterTester.test(sr, 512)
@staticmethod
def getStructuresWithUniProtIDs():
print("** All structures with null resolution **")
FilterTester.openDB()
sr = StructureResultSet(ddGdb)
sr.addFilter(StructureFilter.WithUniProtIDs(["P0A7Y4"], ["RNH_ECOLI", "RNP30_RANPI"]))
FilterTester.test(sr, 15)
@staticmethod
def getStructuresFilteredByStructures():
'''Select all Structure records.'''
print("** Experiments filtered by structures **")
FilterTester.openDB()
sr1 = StructureResultSet(ddGdb, SQL = "WHERE PDB_ID LIKE %s", parameters = "1A%")
FilterTester.test(sr1, 53)
sr2 = StructureResultSet(ddGdb, SQL = "WHERE PDB_ID LIKE %s", parameters = "1AY%")
FilterTester.test(sr2, 2)
sr = sr1.filterBySet(sr2)
FilterTester.test(sr, 2)
# ExperimentResultSet examples
@staticmethod
def getExperimentsWithSQL():
'''Select all Structure records.'''
print("** All structures **")
FilterTester.openDB()
er = ExperimentResultSet(ddGdb, SQL = "WHERE Structure LIKE %s", parameters = "1A%")
FilterTester.test(er, 287)
er.addFilter(StructureFilter.Resolution(1, 1.7))
FilterTester.test(er, 1)
@staticmethod
def getExperimentsFilteredByStructures():
'''Select all Structure records.'''
print("** Experiments filtered by structures **")
FilterTester.openDB()
sr = StructureResultSet(ddGdb, SQL = "WHERE PDB_ID LIKE %s", parameters = "1AY%")
FilterTester.test(sr, 2)
er = ExperimentResultSet(ddGdb, SQL = "WHERE Structure LIKE %s", parameters = "1A%")
FilterTester.test(er, 287)
er = er.filterBySet(sr)
FilterTester.test(er, 30)
er = ExperimentResultSet(ddGdb, SQL = "WHERE Structure LIKE %s", parameters = "1AY%")
FilterTester.test(er, 30)
#print(er.structure_map.keys())
er.addFilter(StructureFilter.Resolution(1, 1.80))
FilterTester.test(er, 19)
er.addFilter(StructureFilter.Resolution(1, 1.70))
FilterTester.test(er, 0)
@staticmethod
def getExperimentsFilteredBySource():
'''Select all Structure records.'''
print("** Experiments filtered by structures **")
FilterTester.openDB()
er = ExperimentResultSet(ddGdb)
FilterTester.test(er, 14151)
er.addFilter(ExperimentFilter.OnSource(ExperimentFilter.ProTherm))
FilterTester.test(er)
@staticmethod
def getExperimentsFilteredByMutationSize():
'''Select all Structure records.'''
print("** Experiments filtered by mutation size **")
FilterTester.openDB()
total_count = 13208
filters_matrix = [
(ExperimentFilter.large, ExperimentFilter.large, 3098),
(ExperimentFilter.large, ExperimentFilter.small, 3401),
(ExperimentFilter.small, ExperimentFilter.large, 3262),
(ExperimentFilter.small, ExperimentFilter.small, 3447),
]
assert(total_count == sum([f[2] for f in filters_matrix]))
for f in filters_matrix:
er = ExperimentResultSet(ddGdb)
er.addFilter(ExperimentFilter.NumberOfMutations(1, 1))
FilterTester.test(er, total_count)
er.addFilter(ExperimentFilter.MutationsBetweenAminoAcidSizes(f[0], f[1]))
FilterTester.test(er, f[2])
@staticmethod
def getExperimentsFilteredByMutationSize_faster():
'''Another example of speedups using compound filters rather than specific filters.
Time for getExperimentsFilteredByMutationSize on my machine: @1.93s
Time for getExperimentsFilteredByMutationSize_faster on my machine: @1.65s
For the getExperimentsFilteredByMutationSize run, I disabled the first FilterTester.test call (otherwise it takes @2.8s).
'''
print("** Experiments filtered by mutation size **")
FilterTester.openDB()
total_count = 13208
filters_matrix = [
(ExperimentFilter.large, ExperimentFilter.large, 3098),
(ExperimentFilter.large, ExperimentFilter.small, 3401),
(ExperimentFilter.small, ExperimentFilter.large, 3262),
(ExperimentFilter.small, ExperimentFilter.small, 3447),
]
assert(total_count == sum([f[2] for f in filters_matrix]))
for f in filters_matrix:
er = ExperimentResultSet(ddGdb)
ef = ExperimentFilter()
ef.setNumberOfMutations(1, 1)
ef.setAminoAcidSizes(f[0], f[1])
er.addFilter(ef)
FilterTester.test(er, f[2])
@staticmethod
def getExperimentsFilteredByAminoAcids1():
'''Select all Structure records.'''
print("** Experiments filtered by residue (from ALA) **")
FilterTester.openDB()
er = ExperimentResultSet(ddGdb)
FilterTester.test(er, total_number_of_experiments)
er.addFilter(ExperimentFilter.MutationsBetweenAminoAcids('ALA', 'G'))
FilterTester.test(er, 144)
@staticmethod
def getExperimentsFilteredByAminoAcids2():
'''Select all Structure records.'''
print("** Experiments filtered by residue (from ALA) **")
FilterTester.openDB()
er = ExperimentResultSet(ddGdb)
FilterTester.test(er, total_number_of_experiments)
er.addFilter(ExperimentFilter.MutationsBetweenAminoAcids('A', 'GLY'))
FilterTester.test(er, 144)
@staticmethod
def getExperimentsFilteredByResolution():
'''Select all Structure records.'''
print("** Experiments filtered by structures **")
FilterTester.openDB()
er = ExperimentResultSet(ddGdb)
FilterTester.test(er, total_number_of_experiments)
er.addFilter(StructureFilter.Resolution(1, 2))
FilterTester.test(er, 973)
@staticmethod
def getExperimentsFilteredBySourceAndResolution():
'''Select all Structure records.'''
print("** Experiments filtered by structures **")
FilterTester.openDB()
er = ExperimentResultSet(ddGdb)
FilterTester.test(er, total_number_of_experiments)
er.addFilter(ExperimentFilter.OnSource(ExperimentFilter.ProTherm))
FilterTester.test(er)
er.addFilter(StructureFilter.Resolution(1, 2))
FilterTester.test(er)
# PredictionResultSet examples
@staticmethod
def getAllPredictions():
'''Select all Prediction records.'''
print("** All predictions **")
FilterTester.openDB()
pr = PredictionResultSet(ddGdb)
FilterTester.test(pr, 14373)
@staticmethod
def getPredictionsWithSQL():
'''Select all Structure records.'''
print("** Specific prediction **")
FilterTester.openDB()
pr = PredictionResultSet(ddGdb, SQL = "WHERE PredictionSet=%s AND ID=14061", parameters = "lin-3K0NA")
FilterTester.test(pr, 1)
@staticmethod
def getPredictionsUsingMultipleFilters():
'''This demonstrates the use of multiple filters.'''
print("** Multiple filter example **")
FilterTester.openDB()
t1 = time.time()
pr = PredictionResultSet(ddGdb, SQL = "WHERE PredictionSet=%s", parameters = "AllExperimentsProtocol16")
print(time.time() - t1)
t1 = time.time()
pr.addFilter(StructureFilter.Techniques(StructureFilter.XRay))
print(time.time() - t1)
t1 = time.time()
pr.addFilter(StructureFilter.Resolution(1, 1.5) | StructureFilter.Resolution(3.9, 4))
print(time.time() - t1)
t1 = time.time()
pr.addFilter(StructureFilter.TotalBFactors(0, 10))
print(time.time() - t1)
t1 = time.time()
FilterTester.test(pr, 30)
print(time.time() - t1)
t1 = time.time()
@staticmethod
def getPredictionsUsingMultipleFilters_Speed():
'''This demonstrates how slow separate filters are. Separate filters query the entire table whereas single
filters with multiple criteria drill down further and further, working on subsets of the table.'''
print("** Multiple filter example **")
FilterTester.openDB()
t1 = time.time()
pr = PredictionResultSet(ddGdb, SQL = "WHERE PredictionSet=%s", parameters = "AllExperimentsProtocol16")
pr.addFilter(StructureFilter.Techniques(StructureFilter.XRay))
pr.addFilter(StructureFilter.Resolution(1, 1.5) | StructureFilter.Resolution(3.9, 4))
pr.addFilter(StructureFilter.TotalBFactors(0, 10))
FilterTester.test(pr, 30)
t2 = time.time()
print("Time taken: %0.2fs" % (t2 - t1))
t1 = time.time()
pr = PredictionResultSet(ddGdb, SQL = "WHERE PredictionSet=%s", parameters = "AllExperimentsProtocol16")
sf = StructureFilter()
sf.setTechniques(StructureFilter.XRay)
sf.setResolution(1, 1.5)
sf.setTotalBFactors(0, 10)
pr.addFilter(sf | StructureFilter.Resolution(3.9, 4))
FilterTester.test(pr, 30)
t2 = time.time()
print("Time taken: %0.2fs" % (t2 - t1))
@staticmethod
def showResultSetOperations():
'''Demonstrates how to union, intersect, subtract, and XOR ResultSets.'''
range_size1 = 14
range_size2 = 187
range_size3 = 506
range_size4 = 8
print("\n** ResultSet SR1 **\n")
FilterTester.openDB()
sr1 = StructureResultSet(ddGdb)
sr1.addFilter(StructureFilter.Resolution(1, 1.3))
FilterTester.test(sr1, 14)
print("\n** ResultSet SR2 **\n")
sr2 = StructureResultSet(ddGdb)
sr2.addFilter(StructureFilter.Resolution(2, 2.3))
FilterTester.test(sr2, 187)
print("\n** ResultSet SR3 **\n")
sr3 = StructureResultSet(ddGdb)
sr3.addFilter(StructureFilter.Resolution(1.2, 2))
FilterTester.test(sr3, 506)
print("\n** ResultSet union - SR1 | SR2 **\n")
srUnion = sr1 | sr2
assert(len(srUnion) == range_size1 + range_size2)
print(join(srUnion._log, "\n"))
print("\n** ResultSet union - SR1 - SR3 **\n")
srUnion = sr1 - sr3
print(join(srUnion._log, "\n"))
print("\n** ResultSet intersection - SR1 & SR3 **\n")
srIntersection = sr1 & sr3
print(join(srIntersection._log, "\n"))
print("\n** ResultSet difference and union sanity check **\n")
assert(len(srUnion) + len(srIntersection) == range_size1)
print("\n** ResultSet intersection sanity check **\n")
sr4 = StructureResultSet(ddGdb)
sr4.addFilter(StructureFilter.Resolution(1.2, 1.3))
FilterTester.test(sr4, range_size4)
print("\n** ResultSet difference - SR1 - SR3 **\n")
srDifference = sr1 / sr3
print(join(srDifference._log, "\n"))
print("\n** ResultSet exclusive or - SR1 ^ SR3 **\n")
srXOR = sr1 ^ sr3
print(join(srXOR._log, "\n"))
print("\n** ResultSet exclusive or sanity check **\n")
assert(len(srXOR) == (range_size1 - len(srIntersection)) + (range_size3 - len(srIntersection)))
@staticmethod
def showAllEligibleProTherm(PredictionSet, ProtocolID, KeepHETATMLines):
#inserter = JobInserter()
colortext.printf("\nAdding ProTherm mutations to %s prediction set." % PredictionSet, "lightgreen")
#ddGdb = dbi.ddGDatabase()
MAX_RESOLUTION = 2.1
MAX_NUMRES_PROTHERM = 350
MAX_STANDARD_DEVIATION = 1.0
FilterTester.openDB()
if False:
t1 = time.time()
er1 = ExperimentResultSet(ddGdb)
er1.addFilter(ExperimentFilter.OnSource(ExperimentFilter.ProTherm))
er1.addFilter(ExperimentFilter.NumberOfMutations(1, 1))
er1.addFilter(ExperimentFilter.NumberOfChains(1, 1))
er1.addFilter(ExperimentFilter.StandardDeviation(None, MAX_STANDARD_DEVIATION))
er1.addFilter(StructureFilter.Resolution(None, MAX_RESOLUTION))
er1.addFilter(StructureFilter.Techniques(StructureFilter.XRay))
FilterTester.test(er1)
t2 = time.time()
print(t2 - t1)
# This method usually takes around 65% of the time as the method above
t1 = time.time()
ef1 = ExperimentFilter()
ef1.setSource(ExperimentFilter.ProTherm)
er1 = ExperimentResultSet(ddGdb)
er1.addFilter(ExperimentFilter.OnSource(ExperimentFilter.ProTherm))
FilterTester.test(er1)
ef1.setNumberOfMutations(1, 1)
ef1.setNumberOfChains(1, 1)
ef1.setStandardDeviation(None, MAX_STANDARD_DEVIATION)
sf1 = StructureFilter()
sf1.setResolution(None, MAX_RESOLUTION)
sf1.setTechniques(StructureFilter.XRay)
er1 = ExperimentResultSet(ddGdb)
er1.addFilter(ef1)
er1.addFilter(sf1)
FilterTester.test(er1)
t2 = time.time()
print(t2 - t1)
experimentIDs = sorted(list(er1.getFilteredIDs()))
colortext.message("\nThe number of unique ProTherm experiments with:\n\t- one mutation;\n\t- structures solved by X-ray diffraction and with <= %d residues;\n\t- a maximum standard deviation in experimental results of <= %0.2f;\n\t- and a resolution of <= %0.2f Angstroms.\nis %d.\n" % (MAX_NUMRES_PROTHERM, MAX_STANDARD_DEVIATION, MAX_RESOLUTION, len(experimentIDs)))
ddG_connection = db.ddG()
count = 0
sys.exit(0)
print("")
for experimentID in experimentIDs:
ddG_connection.addPrediction(experimentID, PredictionSet, ProtocolID, KeepHETATMLines, StoreOutput = True)
count += 1
if count >= 10:
colortext.write(".")
colortext.flush()
count = 0
print("")
@staticmethod
def testPublications():
ddG_connection = db.ddG()
FilterTester.openDB()
pr = PredictionResultSet(ddGdb, SQL = "WHERE ID >= 28331 and ID <= 28431")
print(1)
er = ExperimentResultSet(ddGdb, SQL = "WHERE ID >= 110906 and ID <= 111006")
print(2)
print(len(pr))
print(len(er))
ddG_connection.getPublications(pr)
ddG_connection.getPublications(er)
if False:
import analysis
analyzer = analysis.Analyzer("AllExperimentsProtocol16")
analyzer.AddPublishedDDGsToAnalysisTables()
analyzer.plot(analysis.Analyzer.correlation_coefficient, "Kellogg.rr", table_names = ["Kellogg"])
# "kellogg.txt", "Kellogg")
for table_name, a_table in sorted(analyzer.analysis_tables.iteritems()):
print(a_table)
print(table_name)
#print(analysis.AnalysisPoint.headers)
#print(analysis_tables)
#print(analysis.AnalysisPoint.headers)
#print(analysis_tables["Kellogg"])
#ddG_connection.createPredictionsFromUserDataSet("AllValidPGPK", "AllExperimentsProtocol16", "Kellogg:10.1002/prot.22921:protocol16:32231", False, StoreOutput = False, Description = {}, InputFiles = {}, testonly = False)
#ddG_connection = db.ddG()
#ddG_connection.addPDBtoDatabase(pdbID = "1FKJ")
ddG_connection = db.ddG()
if __name__ == '__main__':
#help()
# Tested functions
tests = [
FilterTester.unionFilterExample1,
FilterTester.unionFilterExample2,
FilterTester.allStructures,
FilterTester.getStructuresWithNullResolutionSQL,
FilterTester.getStructuresWithNullResolutionFilter,
FilterTester.pickSpecific,
FilterTester.getStructuresInResolutionRange,
FilterTester.getStructuresWithUniProtIDs,
FilterTester.getStructuresFilteredByStructures,
FilterTester.getExperimentsWithSQL,
FilterTester.getExperimentsFilteredByStructures,
FilterTester.getExperimentsFilteredByMutationSize,
FilterTester.getExperimentsFilteredByMutationSize_faster,
FilterTester.getExperimentsFilteredByAminoAcids1,
FilterTester.getExperimentsFilteredByAminoAcids2,
FilterTester.getExperimentsFilteredByResolution,
FilterTester.getAllPredictions,
FilterTester.getPredictionsWithSQL,
FilterTester.getPredictionsUsingMultipleFilters,
FilterTester.getPredictionsUsingMultipleFilters_Speed,
FilterTester.showResultSetOperations,
]
tests = [FilterTester.getExperimentsFilteredByMutationSize]
# BROKEN FUNCTIONS
#FilterTester.getExperimentsFilteredBySource() # Needs update w.r.t. new database schema
#FilterTester.getExperimentsFilteredBySourceAndResolution() # Needs update w.r.t. new database schema
#FilterTester.showAllEligibleProTherm("test", "test", False) # Needs update w.r.t. new database schema
#FilterTester.testPublications # Needs update w.r.t. new database schema
do_profiling = False
import gc
gc.disable()
if do_profiling:
FilterTester.profile('FilterTester.getExperimentsFilteredByMutationSize_faster()')
else:
for t in tests:
t1 = time.time()
t()
print("** Total time taken in getExperimentsFilteredByMutationSize_faster: %0.2f **" % (time.time() - t1))
gc.enable()
|
|
"""
Query subclasses which provide extra functionality beyond simple data retrieval.
"""
from django.conf import settings
from django.core.exceptions import FieldError
from django.db import connections
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import DateField, DateTimeField, FieldDoesNotExist
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE, SelectInfo
from django.db.models.sql.datastructures import Date, DateTime
from django.db.models.sql.query import Query
from django.db.models.sql.where import AND, Constraint
from django.utils import six
from django.utils import timezone
__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'DateQuery',
'DateTimeQuery', 'AggregateQuery']
class DeleteQuery(Query):
"""
Delete queries are done through this class, since they are more constrained
than general queries.
"""
compiler = 'SQLDeleteCompiler'
def do_query(self, table, where, using):
self.tables = [table]
self.where = where
self.get_compiler(using).execute_sql(None)
def delete_batch(self, pk_list, using, field=None):
"""
Set up and execute delete queries for all the objects in pk_list.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
if not field:
field = self.get_meta().pk
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
where = self.where_class()
where.add((Constraint(None, field.column, field), 'in',
pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]), AND)
self.do_query(self.get_meta().db_table, where, using=using)
def delete_qs(self, query, using):
"""
Delete the queryset in one SQL query (if possible). For simple queries
this is done by copying the query.query.where to self.query, for
complex queries by using subquery.
"""
innerq = query.query
# Make sure the inner query has at least one table in use.
innerq.get_initial_alias()
# The same for our new query.
self.get_initial_alias()
innerq_used_tables = [t for t in innerq.tables
if innerq.alias_refcount[t]]
if ((not innerq_used_tables or innerq_used_tables == self.tables)
and not len(innerq.having)):
# There is only the base table in use in the query, and there is
# no aggregate filtering going on.
self.where = innerq.where
else:
pk = query.model._meta.pk
if not connections[using].features.update_can_self_select:
# We can't do the delete using subquery.
values = list(query.values_list('pk', flat=True))
if not values:
return
self.delete_batch(values, using)
return
else:
innerq.clear_select_clause()
innerq.select = [
SelectInfo((self.get_initial_alias(), pk.column), None)
]
values = innerq
where = self.where_class()
where.add((Constraint(None, pk.column, pk), 'in', values), AND)
self.where = where
self.get_compiler(using).execute_sql(None)
class UpdateQuery(Query):
"""
Represents an "update" SQL query.
"""
compiler = 'SQLUpdateCompiler'
def __init__(self, *args, **kwargs):
super(UpdateQuery, self).__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Runs on initialization and after cloning. Any attributes that would
normally be set in __init__ should go in here, instead, so that they
are also set up after a clone() call.
"""
self.values = []
self.related_ids = None
if not hasattr(self, 'related_updates'):
self.related_updates = {}
def clone(self, klass=None, **kwargs):
return super(UpdateQuery, self).clone(klass,
related_updates=self.related_updates.copy(), **kwargs)
def update_batch(self, pk_list, values, using):
pk_field = self.get_meta().pk
self.add_update_values(values)
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.where.add((Constraint(None, pk_field.column, pk_field), 'in',
pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]),
AND)
self.get_compiler(using).execute_sql(None)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in six.iteritems(values):
field, model, direct, m2m = self.get_meta().get_field_by_name(name)
if not direct or m2m:
raise FieldError('Cannot update model field %r (only non-relations and foreign keys permitted).' % field)
if model:
self.add_related_update(model, field, val)
continue
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Turn a sequence of (field, model, value) triples into an update query.
Used by add_update_values() as well as the "fast" update path when
saving models.
"""
self.values.extend(values_seq)
def add_related_update(self, model, field, value):
"""
Adds (name, value) to an update query for an ancestor model.
Updates are coalesced so that we only run one update query per ancestor.
"""
self.related_updates.setdefault(model, []).append((field, None, value))
def get_related_updates(self):
"""
Returns a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in six.iteritems(self.related_updates):
query = UpdateQuery(model)
query.values = values
if self.related_ids is not None:
query.add_filter(('pk__in', self.related_ids))
result.append(query)
return result
class InsertQuery(Query):
compiler = 'SQLInsertCompiler'
def __init__(self, *args, **kwargs):
super(InsertQuery, self).__init__(*args, **kwargs)
self.fields = []
self.objs = []
def clone(self, klass=None, **kwargs):
extras = {
'fields': self.fields[:],
'objs': self.objs[:],
'raw': self.raw,
}
extras.update(kwargs)
return super(InsertQuery, self).clone(klass, **extras)
def insert_values(self, fields, objs, raw=False):
"""
Set up the insert query from the 'insert_values' dictionary. The
dictionary gives the model field names and their target values.
If 'raw_values' is True, the values in the 'insert_values' dictionary
are inserted directly into the query, rather than passed as SQL
parameters. This provides a way to insert NULL and DEFAULT keywords
into the query, for example.
"""
self.fields = fields
self.objs = objs
self.raw = raw
class DateQuery(Query):
"""
A DateQuery is a normal query, except that it specifically selects a single
date field. This requires some special handling when converting the results
back to Python objects, so we put it in a separate class.
"""
compiler = 'SQLDateCompiler'
def add_select(self, field_name, lookup_type, order='ASC'):
"""
Converts the query into an extraction query.
"""
try:
result = self.setup_joins(
field_name.split(LOOKUP_SEP),
self.get_meta(),
self.get_initial_alias(),
)
except FieldError:
raise FieldDoesNotExist("%s has no field named '%s'" % (
self.get_meta().object_name, field_name
))
field = result[0]
self._check_field(field) # overridden in DateTimeQuery
alias = result[3][-1]
select = self._get_select((alias, field.column), lookup_type)
self.clear_select_clause()
self.select = [SelectInfo(select, None)]
self.distinct = True
self.order_by = [1] if order == 'ASC' else [-1]
if field.null:
self.add_filter(("%s__isnull" % field_name, False))
def _check_field(self, field):
assert isinstance(field, DateField), \
"%r isn't a DateField." % field.name
if settings.USE_TZ:
assert not isinstance(field, DateTimeField), \
"%r is a DateTimeField, not a DateField." % field.name
def _get_select(self, col, lookup_type):
return Date(col, lookup_type)
class DateTimeQuery(DateQuery):
"""
A DateTimeQuery is like a DateQuery but for a datetime field. If time zone
support is active, the tzinfo attribute contains the time zone to use for
converting the values before truncating them. Otherwise it's set to None.
"""
compiler = 'SQLDateTimeCompiler'
def clone(self, klass=None, memo=None, **kwargs):
if 'tzinfo' not in kwargs and hasattr(self, 'tzinfo'):
kwargs['tzinfo'] = self.tzinfo
return super(DateTimeQuery, self).clone(klass, memo, **kwargs)
def _check_field(self, field):
assert isinstance(field, DateTimeField), \
"%r isn't a DateTimeField." % field.name
def _get_select(self, col, lookup_type):
if self.tzinfo is None:
tzname = None
else:
tzname = timezone._get_timezone_name(self.tzinfo)
return DateTime(col, lookup_type, tzname)
class AggregateQuery(Query):
"""
An AggregateQuery takes another query as a parameter to the FROM
clause and only selects the elements in the provided list.
"""
compiler = 'SQLAggregateCompiler'
def add_subquery(self, query, using):
self.subquery, self.sub_params = query.get_compiler(using).as_sql(with_col_aliases=True)
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2015-2017 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletTest (BitcoinTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
target_fee = fee_per_byte * tx_size
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
# allow the node's estimation to be at most 2 bytes off
if fee > fee_per_byte * (tx_size + 2):
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
return curr_balance
def setup_chain(self,bitcoinConfDict=None, wallets=None):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
print("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 50)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
assert_equal(len(self.nodes[0].listunspent()), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
# Send 21 BTC from 0 to 2 using sendtoaddress call.
# Second transaction will be child of first, and will require a fee
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises(JSONRPCException, self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all()
# node0 should end up with 100 btc in block rewards plus fees, but
# minus the 21 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 100-21)
assert_equal(self.nodes[2].getbalance(), 21)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 100)
assert_equal(self.nodes[2].getbalance("from1"), 100-21)
# Send 10 BTC normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 10, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('90'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('10'))
# Send 10 BTC with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 10, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('20'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 10 BTC
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [])
self.nodes[2].generate(1)
self.sync_all()
node_0_bal += Decimal('10')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 10 BTC with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [address])
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all()
assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all()
node_2_bal += 2
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
node_2_bal += 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException as e:
assert("Invalid amount" in e.error['message'])
else:
raise AssertionError("Must not parse invalid amounts")
try:
self.nodes[0].generate("2")
raise AssertionError("Must not accept strings as numeric")
except JSONRPCException as e:
assert("not an integer" in e.error['message'])
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all()
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
#check if wallet or blochchain maintenance changes the balance
self.sync_all()
blocks = self.nodes[0].generate(2)
self.sync_all()
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
'-salvagewallet',
]
for m in maintenance:
print("check " + m)
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [[m]] * 3)
while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
# reindex will leave rpc warm up "early"; Wait for it to finish
time.sleep(0.1)
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
if __name__ == '__main__':
WalletTest ().main ()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Stephen Fromm <sfromm@gmail.com>
# Copyright: (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: assemble
short_description: Assemble configuration files from fragments
description:
- Assembles a configuration file from fragments.
- Often a particular program will take a single configuration file and does not support a
C(conf.d) style structure where it is easy to build up the configuration
from multiple sources. C(assemble) will take a directory of files that can be
local or have already been transferred to the system, and concatenate them
together to produce a destination file.
- Files are assembled in string sorting order.
- Puppet calls this idea I(fragments).
- This module is also supported for Windows targets.
notes:
- This module is also supported for Windows targets.
- See also M(copy) and M(template).
version_added: '0.5'
options:
src:
description:
- An already existing directory full of source files.
required: true
dest:
description:
- A file to create using the concatenation of all of the source files.
required: true
backup:
description:
- Create a backup file (if C(yes)), including the timestamp information so
you can get the original file back if you somehow clobbered it
incorrectly.
type: bool
default: no
delimiter:
description:
- A delimiter to separate the file contents.
version_added: '1.4'
remote_src:
description:
- If C(no), it will search for src at originating/master machine.
- If C(yes), it will go to the remote/target machine for the src.
type: bool
default: yes
version_added: '1.4'
regexp:
description:
- Assemble files only if C(regex) matches the filename.
- If not set, all files are assembled.
- Every "\" (backslash) must be escaped as "\\" to comply to YAML syntax.
- Uses L(Python regular expressions,http://docs.python.org/2/library/re.html).
ignore_hidden:
description:
- A boolean that controls if files that start with a '.' will be included or not.
type: bool
default: no
version_added: '2.0'
validate:
description:
- The validation command to run before copying into place.
- The path to the file to validate is passed in via '%s' which must be present as in the sshd example below.
- The command is passed securely so shell features like expansion and pipes won't work.
version_added: '2.0'
author:
- Stephen Fromm (@sfromm)
extends_documentation_fragment:
- files
- decrypt
'''
EXAMPLES = r'''
- name: Assemble from fragments from a directory
assemble:
src: /etc/someapp/fragments
dest: /etc/someapp/someapp.conf
- name: Inserted provided delimiter in between each fragment
assemble:
src: /etc/someapp/fragments
dest: /etc/someapp/someapp.conf
delimiter: '### START FRAGMENT ###'
- name: Assemble a new "sshd_config" file into place, after passing validation with sshd
assemble:
src: /etc/ssh/conf.d/
dest: /etc/ssh/sshd_config
validate: '/usr/sbin/sshd -t -f %s'
'''
import codecs
import os
import re
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import b
from ansible.module_utils._text import to_native
def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False, tmpdir=None):
''' assemble a file from a directory of fragments '''
tmpfd, temp_path = tempfile.mkstemp(dir=tmpdir)
tmp = os.fdopen(tmpfd, 'wb')
delimit_me = False
add_newline = False
for f in sorted(os.listdir(src_path)):
if compiled_regexp and not compiled_regexp.search(f):
continue
fragment = os.path.join(src_path, f)
if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
continue
fragment_content = open(fragment, 'rb').read()
# always put a newline between fragments if the previous fragment didn't end with a newline.
if add_newline:
tmp.write(b('\n'))
# delimiters should only appear between fragments
if delimit_me:
if delimiter:
# un-escape anything like newlines
delimiter = codecs.escape_decode(delimiter)[0]
tmp.write(delimiter)
# always make sure there's a newline after the
# delimiter, so lines don't run together
if delimiter[-1] != b('\n'):
tmp.write(b('\n'))
tmp.write(fragment_content)
delimit_me = True
if fragment_content.endswith(b('\n')):
add_newline = False
else:
add_newline = True
tmp.close()
return temp_path
def cleanup(path, result=None):
# cleanup just in case
if os.path.exists(path):
try:
os.remove(path)
except (IOError, OSError) as e:
# don't error on possible race conditions, but keep warning
if result is not None:
result['warnings'] = ['Unable to remove temp file (%s): %s' % (path, to_native(e))]
def main():
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec=dict(
src=dict(required=True, type='path'),
delimiter=dict(required=False),
dest=dict(required=True, type='path'),
backup=dict(default=False, type='bool'),
remote_src=dict(default=False, type='bool'),
regexp=dict(required=False),
ignore_hidden=dict(default=False, type='bool'),
validate=dict(required=False, type='str'),
),
add_file_common_args=True,
)
changed = False
path_hash = None
dest_hash = None
src = module.params['src']
dest = module.params['dest']
backup = module.params['backup']
delimiter = module.params['delimiter']
regexp = module.params['regexp']
compiled_regexp = None
ignore_hidden = module.params['ignore_hidden']
validate = module.params.get('validate', None)
result = dict(src=src, dest=dest)
if not os.path.exists(src):
module.fail_json(msg="Source (%s) does not exist" % src)
if not os.path.isdir(src):
module.fail_json(msg="Source (%s) is not a directory" % src)
if regexp is not None:
try:
compiled_regexp = re.compile(regexp)
except re.error as e:
module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (to_native(e), regexp))
if validate and "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % validate)
path = assemble_from_fragments(src, delimiter, compiled_regexp, ignore_hidden, module.tmpdir)
path_hash = module.sha1(path)
result['checksum'] = path_hash
# Backwards compat. This won't return data if FIPS mode is active
try:
pathmd5 = module.md5(path)
except ValueError:
pathmd5 = None
result['md5sum'] = pathmd5
if os.path.exists(dest):
dest_hash = module.sha1(dest)
if path_hash != dest_hash:
if validate:
(rc, out, err) = module.run_command(validate % path)
result['validation'] = dict(rc=rc, stdout=out, stderr=err)
if rc != 0:
cleanup(path)
module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc, err))
if backup and dest_hash is not None:
result['backup_file'] = module.backup_local(dest)
module.atomic_move(path, dest, unsafe_writes=module.params['unsafe_writes'])
changed = True
cleanup(path, result)
# handle file permissions
file_args = module.load_file_common_arguments(module.params)
result['changed'] = module.set_fs_attributes_if_different(file_args, changed)
# Mission complete
result['msg'] = "OK"
module.exit_json(**result)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/python
################################################################################
# ____ __ __ _ _____ _ _ #
# / __ \ \ \ / / | | / ____| | | | #
# | | | |_ __ ___ _ __ \ /\ / /__| |__ | | __| | ___ | |__ ___ #
# | | | | '_ \ / _ \ '_ \ \/ \/ / _ \ '_ \| | |_ | |/ _ \| '_ \ / _ \ #
# | |__| | |_) | __/ | | \ /\ / __/ |_) | |__| | | (_) | |_) | __/ #
# \____/| .__/ \___|_| |_|\/ \/ \___|_.__/ \_____|_|\___/|_.__/ \___| #
# | | #
# |_| #
# #
# 3D Object Converter #
# Version 1.0.1 #
# #
# (c) 2010-2011 by #
# University of Applied Sciences Northwestern Switzerland #
# Institute of Geomatics Engineering #
# martin.christen@fhnw.ch #
################################################################################
# Licensed under MIT License. Read the file LICENSE for more information @
################################################################################
import urllib2
import sys
import os
import os.path
import tarfile
import re
import glob
if len(sys.argv) < 2:
print('usage:\n')
print('--source wavefront.obj')
print('--calccenter')
print('--flipxy')
print('--integer')
print('--flipxz')
print('\nexample: obj2json.py --source bla.obj --calccenter')
sys.exit()
filename = ""
bSource = 0
bCalccenter = 0
bFlipxy = 0
bInteger = 0
bFlipxz = 0
id = 1
lng = 0
lat = 0
elv = 0
texture = ""
for i in range(1,len(sys.argv)):
if not(sys.argv[i].startswith('--')):
if bSource == 1:
filename = sys.argv[i]
if sys.argv[i] == ('--source'):
bSource = 1
if sys.argv[i] == ('--calccenter'):
bCalccenter = 1
if sys.argv[i] == ('--flipxy'):
bFlipxy = 1
if sys.argv[i] == ('--integer'):
bInteger = 1
if sys.argv[i] == ('--flipxz'):
bFlipxz = 1
if (bSource == 0):
print('Error: please specify input file using --source parameter')
sys.exit()
if (bSource):
print('Source: ' + filename)
if (bCalccenter):
print('calculating centroid...')
if (bFlipxy):
print('flipping x and y!')
if (bFlipxz):
print('flipping x and z!')
color = ",1,0,0,1" #set color
f = open(filename, "r")
#read type p,pt,pnt etc. recognizable on face definition "1"->p "1/2"->pn "1/1/1"->pnt "1//1"->pt
wholefile = f.read();
wholefile = re.sub("[ ]+"," ",wholefile)
test = re.search("\nf \d*/\d*/\d*",wholefile) # 1/2/1 -> pnt
if test:
vertexsemantic = "pnt"
test1 = re.search("\nf \d*//\d*",wholefile) # 1//1 -> pn
if test1:
vertexsemantic = "pn"
print "conversion failed: vertexsemantic 'pn' is currently not supported"
quit()
test2 = re.search("\nf \d*/\d* ",wholefile) #1/2 ->pt
if test2:
vertexsemantic = "pt"
test3 = re.search("\nf \d* ",wholefile) # 1 -> p
if test3:
vertexsemantic = "p"
#vertexsemantic is now defined. ------------------------------------------------
print "vertexsemantic found: "+vertexsemantic
f.seek(0); #sets the file cursor back
#set v ,vn, vt arrays
lines = f.readlines();
v = [] #vert coordinates
vt = [] #texture coordinate
vn = [] #normal coordinates
face = []
vertices = []
p = []
pt = []
pnt = []
idx = []
ilb = []
ilb2 = []
bHasNormals = 0
if vertexsemantic == "pnt":
bHasNormals = 1
#for i in range(0,65000):
# ilb.append(" ") #interleaved buffer
cnt = 0
for line in lines:
if line[0:2] == "v ":
rline = re.sub(r"\s+",",",line[1:-1])
v.append(rline[1:])
elif line[0:3] == "vn ":
rline = re.sub(r"\s+",",",line[1:-1])
vn.append(rline[2:])
elif line[0:3] == "vt ":
rline = re.sub(r"\s+",",",line[1:-1])
vt.append(rline[2:])
elif line[0:2] == "f ": #face definition
vertices = line[2:-1].split() #splits every space
for vert in vertices:
if vertexsemantic == "p":
ilb.append(v[int(vert)-1]+color)
idx.append(cnt)
cnt = cnt + 1
elif vertexsemantic == "pt": #this means f 1/2
a = vert.split('/')
ilb.append(v[int(a[0])-1]+","+(vt[int(a[1])-1]))
idx.append(cnt)
cnt = cnt + 1
elif vertexsemantic == "pnt": #this means f 1/2/2 (note in wavefront it is: p/t/n and not pnt!)
a = vert.split('/')
ilb.append((v[int(a[0])-1]+","+(vn[int(a[2])-1])+","+(vt[int(a[1])-1])))
idx.append(cnt)
cnt = cnt + 1
f.close();
cx = 0;
cy = 0;
cz = 0;
numelems = len(v);
part = 1.0 / float(numelems)
print ('Number of elements: ' + str(numelems))
for c in v:
tokens = c.split(',')
x = float(tokens[0])
y = float(tokens[1])
z = float(tokens[2])
cx = cx + x * part;
cy = cy + y * part;
cz = cz + z * part;
if bInteger:
cx = int(cx)
cy = int(cy)
cz = int(cz)
print ('Center = (' + str(cx) + ', ' + str(cy) + ', ' + str(cz))
lng = cx
lat = cy
elv = cz
#now recreate ilb
for ilbiterator in ilb:
tokens2 = ilbiterator.split(',')
if len(tokens2) > 2:
if bCalccenter:
newx = float(tokens2[0]) - cx
newy = float(tokens2[1]) - cy
newz = float(tokens2[2]) - cz
else:
newx = float(tokens2[0])
newy = float(tokens2[1])
newz = float(tokens2[2])
if bFlipxy:
s = str(newy) + ',' + str(-newz) + ',' + str(newx)
elif bFlipxz:
s = str(newz) + ',' + str(newy) + ',' + str(newx)
else:
s = str(newx) + ',' + str(newy) + ',' + str(newz)
if (bHasNormals):
normalx = float(tokens2[3])
normaly = float(tokens2[4])
normalz = float(tokens2[5])
if bFlipxy:
s = s + ',' + str(normaly) + ',' + str(-normalz) + ',' + str(normalx)
elif bFlipxz:
s = s + ',' + str(normalz) + ',' + str(normaly) + ',' + str(normalx)
else:
s = s + ',' + str(normalx) + ',' + str(normaly) + ',' + str(normalz)
for i in range(6,len(tokens2)):
s = s + ','
s = s + tokens2[i]
else:
for i in range(3,len(tokens2)):
s = s + ','
s = s + tokens2[i]
ilb2.append(s)
ilb = ilb2
ilb2 = []
k=0;
#write to json format
name = filename.split('.')
g = open(name[0]+'.json',"w")
g.write("[")
while len(ilb)>0:
print len(ilb)
g.write("[{\n\"id\" : \""+str(id)+"\",")
g.write("\n\"Center\" : ["+str(lng)+","+str(lat)+","+str(elv)+"],")
g.write("\n\"DiffuseMap\" : \""+str(texture)+"\",")
g.write("\n\"VertexSemantic\" : \""+vertexsemantic+"\",\n\"Vertices\" : [")
k=0
for x in ilb:
k=k+1
if(k>65000):
break
if(x != " "):
g.write("\t"+x+",\n")
g.write("\t\t\t\t")
for i in range(0,k):
ilb.pop(0)
g.seek(-7,1) #set cursor pos back to remove last ','
g.write("],\n\"IndexSemantic\" : \"TRIANGLES\",\n\"Indices\" : [\t")
i=0
for x in idx:
i+=1
if(i>k):
break
g.write(str(int(x))+",")
if i%3==0:
g.write("\n\t\t\t\t")
g.seek(-7,1) #set cursor pos back to remove last ','
g.write("]}],")
g.seek(-1,1) #set cursor pos back to remove last ','
g.write("\n\n]")
print "conversion successfully finished..."
|
|
import scipy
import numpy as np
from ...metaarray import MetaArray
def downsample(data, n, axis=0, xvals='subsample'):
"""Downsample by averaging points together across axis.
If multiple axes are specified, runs once per axis.
If a metaArray is given, then the axis values can be either subsampled
or downsampled to match.
"""
ma = None
if (hasattr(data, 'implements') and data.implements('MetaArray')):
ma = data
data = data.view(np.ndarray)
if hasattr(axis, '__len__'):
if not hasattr(n, '__len__'):
n = [n]*len(axis)
for i in range(len(axis)):
data = downsample(data, n[i], axis[i])
return data
nPts = int(data.shape[axis] / n)
s = list(data.shape)
s[axis] = nPts
s.insert(axis+1, n)
sl = [slice(None)] * data.ndim
sl[axis] = slice(0, nPts*n)
d1 = data[tuple(sl)]
#print d1.shape, s
d1.shape = tuple(s)
d2 = d1.mean(axis+1)
if ma is None:
return d2
else:
info = ma.infoCopy()
if 'values' in info[axis]:
if xvals == 'subsample':
info[axis]['values'] = info[axis]['values'][::n][:nPts]
elif xvals == 'downsample':
info[axis]['values'] = downsample(info[axis]['values'], n)
return MetaArray(d2, info=info)
def applyFilter(data, b, a, padding=100, bidir=True):
"""Apply a linear filter with coefficients a, b. Optionally pad the data before filtering
and/or run the filter in both directions."""
d1 = data.view(np.ndarray)
if padding > 0:
d1 = np.hstack([d1[:padding], d1, d1[-padding:]])
if bidir:
d1 = scipy.signal.lfilter(b, a, scipy.signal.lfilter(b, a, d1)[::-1])[::-1]
else:
d1 = scipy.signal.lfilter(b, a, d1)
if padding > 0:
d1 = d1[padding:-padding]
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d1, info=data.infoCopy())
else:
return d1
def besselFilter(data, cutoff, order=1, dt=None, btype='low', bidir=True):
"""return data passed through bessel filter"""
if dt is None:
try:
tvals = data.xvals('Time')
dt = (tvals[-1]-tvals[0]) / (len(tvals)-1)
except:
dt = 1.0
b,a = scipy.signal.bessel(order, cutoff * dt, btype=btype)
return applyFilter(data, b, a, bidir=bidir)
#base = data.mean()
#d1 = scipy.signal.lfilter(b, a, data.view(ndarray)-base) + base
#if (hasattr(data, 'implements') and data.implements('MetaArray')):
#return MetaArray(d1, info=data.infoCopy())
#return d1
def butterworthFilter(data, wPass, wStop=None, gPass=2.0, gStop=20.0, order=1, dt=None, btype='low', bidir=True):
"""return data passed through bessel filter"""
if dt is None:
try:
tvals = data.xvals('Time')
dt = (tvals[-1]-tvals[0]) / (len(tvals)-1)
except:
dt = 1.0
if wStop is None:
wStop = wPass * 2.0
ord, Wn = scipy.signal.buttord(wPass*dt*2., wStop*dt*2., gPass, gStop)
#print "butterworth ord %f Wn %f c %f sc %f" % (ord, Wn, cutoff, stopCutoff)
b,a = scipy.signal.butter(ord, Wn, btype=btype)
return applyFilter(data, b, a, bidir=bidir)
def rollingSum(data, n):
d1 = data.copy()
d1[1:] += d1[:-1] # integrate
d2 = np.empty(len(d1) - n + 1, dtype=data.dtype)
d2[0] = d1[n-1] # copy first point
d2[1:] = d1[n:] - d1[:-n] # subtract
return d2
def mode(data, bins=None):
"""Returns location max value from histogram."""
if bins is None:
bins = int(len(data)/10.)
if bins < 2:
bins = 2
y, x = np.histogram(data, bins=bins)
ind = np.argmax(y)
mode = 0.5 * (x[ind] + x[ind+1])
return mode
def modeFilter(data, window=500, step=None, bins=None):
"""Filter based on histogram-based mode function"""
d1 = data.view(np.ndarray)
vals = []
l2 = int(window/2.)
if step is None:
step = l2
i = 0
while True:
if i > len(data)-step:
break
vals.append(mode(d1[i:i+window], bins))
i += step
chunks = [np.linspace(vals[0], vals[0], l2)]
for i in range(len(vals)-1):
chunks.append(np.linspace(vals[i], vals[i+1], step))
remain = len(data) - step*(len(vals)-1) - l2
chunks.append(np.linspace(vals[-1], vals[-1], remain))
d2 = np.hstack(chunks)
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d2, info=data.infoCopy())
return d2
def denoise(data, radius=2, threshold=4):
"""Very simple noise removal function. Compares a point to surrounding points,
replaces with nearby values if the difference is too large."""
r2 = radius * 2
d1 = data.view(np.ndarray)
d2 = d1[radius:] - d1[:-radius] #a derivative
#d3 = data[r2:] - data[:-r2]
#d4 = d2 - d3
stdev = d2.std()
#print "denoise: stdev of derivative:", stdev
mask1 = d2 > stdev*threshold #where derivative is large and positive
mask2 = d2 < -stdev*threshold #where derivative is large and negative
maskpos = mask1[:-radius] * mask2[radius:] #both need to be true
maskneg = mask1[radius:] * mask2[:-radius]
mask = maskpos + maskneg
d5 = np.where(mask, d1[:-r2], d1[radius:-radius]) #where both are true replace the value with the value from 2 points before
d6 = np.empty(d1.shape, dtype=d1.dtype) #add points back to the ends
d6[radius:-radius] = d5
d6[:radius] = d1[:radius]
d6[-radius:] = d1[-radius:]
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d6, info=data.infoCopy())
return d6
def adaptiveDetrend(data, x=None, threshold=3.0):
"""Return the signal with baseline removed. Discards outliers from baseline measurement."""
if x is None:
x = data.xvals(0)
d = data.view(np.ndarray)
d2 = scipy.signal.detrend(d)
stdev = d2.std()
mask = abs(d2) < stdev*threshold
#d3 = where(mask, 0, d2)
#d4 = d2 - lowPass(d3, cutoffs[1], dt=dt)
lr = scipy.stats.linregress(x[mask], d[mask])
base = lr[1] + lr[0]*x
d4 = d - base
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d4, info=data.infoCopy())
return d4
def histogramDetrend(data, window=500, bins=50, threshold=3.0, offsetOnly=False):
"""Linear detrend. Works by finding the most common value at the beginning and end of a trace, excluding outliers.
If offsetOnly is True, then only the offset from the beginning of the trace is subtracted.
"""
d1 = data.view(np.ndarray)
d2 = [d1[:window], d1[-window:]]
v = [0, 0]
for i in [0, 1]:
d3 = d2[i]
stdev = d3.std()
mask = abs(d3-np.median(d3)) < stdev*threshold
d4 = d3[mask]
y, x = np.histogram(d4, bins=bins)
ind = np.argmax(y)
v[i] = 0.5 * (x[ind] + x[ind+1])
if offsetOnly:
d3 = data.view(np.ndarray) - v[0]
else:
base = np.linspace(v[0], v[1], len(data))
d3 = data.view(np.ndarray) - base
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d3, info=data.infoCopy())
return d3
def concatenateColumns(data):
"""Returns a single record array with columns taken from the elements in data.
data should be a list of elements, which can be either record arrays or tuples (name, type, data)
"""
## first determine dtype
dtype = []
names = set()
maxLen = 0
for element in data:
if isinstance(element, np.ndarray):
## use existing columns
for i in range(len(element.dtype)):
name = element.dtype.names[i]
dtype.append((name, element.dtype[i]))
maxLen = max(maxLen, len(element))
else:
name, type, d = element
if type is None:
type = suggestDType(d)
dtype.append((name, type))
if isinstance(d, list) or isinstance(d, np.ndarray):
maxLen = max(maxLen, len(d))
if name in names:
raise Exception('Name "%s" repeated' % name)
names.add(name)
## create empty array
out = np.empty(maxLen, dtype)
## fill columns
for element in data:
if isinstance(element, np.ndarray):
for i in range(len(element.dtype)):
name = element.dtype.names[i]
try:
out[name] = element[name]
except:
print("Column:", name)
print("Input shape:", element.shape, element.dtype)
print("Output shape:", out.shape, out.dtype)
raise
else:
name, type, d = element
out[name] = d
return out
def suggestDType(x):
"""Return a suitable dtype for x"""
if isinstance(x, list) or isinstance(x, tuple):
if len(x) == 0:
raise Exception('can not determine dtype for empty list')
x = x[0]
if hasattr(x, 'dtype'):
return x.dtype
elif isinstance(x, float):
return float
elif isinstance(x, int):
return int
#elif isinstance(x, basestring): ## don't try to guess correct string length; use object instead.
#return '<U%d' % len(x)
else:
return object
def removePeriodic(data, f0=60.0, dt=None, harmonics=10, samples=4):
if (hasattr(data, 'implements') and data.implements('MetaArray')):
data1 = data.asarray()
if dt is None:
times = data.xvals('Time')
dt = times[1]-times[0]
else:
data1 = data
if dt is None:
raise Exception('Must specify dt for this data')
ft = np.fft.fft(data1)
## determine frequencies in fft data
df = 1.0 / (len(data1) * dt)
freqs = np.linspace(0.0, (len(ft)-1) * df, len(ft))
## flatten spikes at f0 and harmonics
for i in xrange(1, harmonics + 2):
f = f0 * i # target frequency
## determine index range to check for this frequency
ind1 = int(np.floor(f / df))
ind2 = int(np.ceil(f / df)) + (samples-1)
if ind1 > len(ft)/2.:
break
mag = (abs(ft[ind1-1]) + abs(ft[ind2+1])) * 0.5
for j in range(ind1, ind2+1):
phase = np.angle(ft[j]) ## Must preserve the phase of each point, otherwise any transients in the trace might lead to large artifacts.
re = mag * np.cos(phase)
im = mag * np.sin(phase)
ft[j] = re + im*1j
ft[len(ft)-j] = re - im*1j
data2 = np.fft.ifft(ft).real
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return metaarray.MetaArray(data2, info=data.infoCopy())
else:
return data2
|
|
"""
opcode module - potentially shared between dis and other modules which
operate on bytecodes (e.g. peephole optimizers).
"""
__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs",
"haslocal", "hascompare", "hasfree", "opname", "opmap",
"HAVE_ARGUMENT", "EXTENDED_ARG", "hasnargs"]
# It's a chicken-and-egg I'm afraid:
# We're imported before _opcode's made.
# With exception unheeded
# (stack_effect is not needed)
# Both our chickens and eggs are allayed.
# --Larry Hastings, 2013/11/23
try:
from _opcode import stack_effect
__all__.append('stack_effect')
except ImportError:
pass
cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
'is not', 'exception match', 'BAD')
hasconst = []
hasname = []
hasjrel = []
hasjabs = []
haslocal = []
hascompare = []
hasfree = []
hasnargs = [] # unused
opmap = {}
opname = ['<%r>' % (op,) for op in range(256)]
def def_op(name, op):
opname[op] = name
opmap[name] = op
def name_op(name, op):
def_op(name, op)
hasname.append(op)
def jrel_op(name, op):
def_op(name, op)
hasjrel.append(op)
def jabs_op(name, op):
def_op(name, op)
hasjabs.append(op)
# Instruction opcodes for compiled code
# Blank lines correspond to available opcodes
def_op('POP_TOP', 1)
def_op('ROT_TWO', 2)
def_op('ROT_THREE', 3)
def_op('DUP_TOP', 4)
def_op('DUP_TOP_TWO', 5)
def_op('NOP', 9)
def_op('UNARY_POSITIVE', 10)
def_op('UNARY_NEGATIVE', 11)
def_op('UNARY_NOT', 12)
def_op('UNARY_INVERT', 15)
def_op('BINARY_MATRIX_MULTIPLY', 16)
def_op('INPLACE_MATRIX_MULTIPLY', 17)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
def_op('BINARY_MODULO', 22)
def_op('BINARY_ADD', 23)
def_op('BINARY_SUBTRACT', 24)
def_op('BINARY_SUBSCR', 25)
def_op('BINARY_FLOOR_DIVIDE', 26)
def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
def_op('GET_AITER', 50)
def_op('GET_ANEXT', 51)
def_op('BEFORE_ASYNC_WITH', 52)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
def_op('INPLACE_MODULO', 59)
def_op('STORE_SUBSCR', 60)
def_op('DELETE_SUBSCR', 61)
def_op('BINARY_LSHIFT', 62)
def_op('BINARY_RSHIFT', 63)
def_op('BINARY_AND', 64)
def_op('BINARY_XOR', 65)
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
def_op('GET_YIELD_FROM_ITER', 69)
def_op('PRINT_EXPR', 70)
def_op('LOAD_BUILD_CLASS', 71)
def_op('YIELD_FROM', 72)
def_op('GET_AWAITABLE', 73)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('BREAK_LOOP', 80)
def_op('WITH_CLEANUP_START', 81)
def_op('WITH_CLEANUP_FINISH', 82)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('SETUP_ANNOTATIONS', 85)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
def_op('END_FINALLY', 88)
def_op('POP_EXCEPT', 89)
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
name_op('STORE_NAME', 90) # Index in name list
name_op('DELETE_NAME', 91) # ""
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
jrel_op('FOR_ITER', 93)
def_op('UNPACK_EX', 94)
name_op('STORE_ATTR', 95) # Index in name list
name_op('DELETE_ATTR', 96) # ""
name_op('STORE_GLOBAL', 97) # ""
name_op('DELETE_GLOBAL', 98) # ""
def_op('LOAD_CONST', 100) # Index in const list
hasconst.append(100)
name_op('LOAD_NAME', 101) # Index in name list
def_op('BUILD_TUPLE', 102) # Number of tuple items
def_op('BUILD_LIST', 103) # Number of list items
def_op('BUILD_SET', 104) # Number of set items
def_op('BUILD_MAP', 105) # Number of dict entries (upto 255)
name_op('LOAD_ATTR', 106) # Index in name list
def_op('COMPARE_OP', 107) # Comparison operator
hascompare.append(107)
name_op('IMPORT_NAME', 108) # Index in name list
name_op('IMPORT_FROM', 109) # Index in name list
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
jabs_op('JUMP_IF_FALSE_OR_POP', 111) # Target byte offset from beginning of code
jabs_op('JUMP_IF_TRUE_OR_POP', 112) # ""
jabs_op('JUMP_ABSOLUTE', 113) # ""
jabs_op('POP_JUMP_IF_FALSE', 114) # ""
jabs_op('POP_JUMP_IF_TRUE', 115) # ""
name_op('LOAD_GLOBAL', 116) # Index in name list
jabs_op('CONTINUE_LOOP', 119) # Target address
jrel_op('SETUP_LOOP', 120) # Distance to target address
jrel_op('SETUP_EXCEPT', 121) # ""
jrel_op('SETUP_FINALLY', 122) # ""
def_op('LOAD_FAST', 124) # Local variable number
haslocal.append(124)
def_op('STORE_FAST', 125) # Local variable number
haslocal.append(125)
def_op('DELETE_FAST', 126) # Local variable number
haslocal.append(126)
name_op('STORE_ANNOTATION', 127) # Index in name list
def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
def_op('CALL_FUNCTION', 131) # #args
def_op('MAKE_FUNCTION', 132) # Flags
def_op('BUILD_SLICE', 133) # Number of items
def_op('LOAD_CLOSURE', 135)
hasfree.append(135)
def_op('LOAD_DEREF', 136)
hasfree.append(136)
def_op('STORE_DEREF', 137)
hasfree.append(137)
def_op('DELETE_DEREF', 138)
hasfree.append(138)
def_op('CALL_FUNCTION_KW', 141) # #args + #kwargs
def_op('CALL_FUNCTION_EX', 142) # Flags
jrel_op('SETUP_WITH', 143)
def_op('LIST_APPEND', 145)
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
def_op('LOAD_CLASSDEREF', 148)
hasfree.append(148)
def_op('EXTENDED_ARG', 144)
EXTENDED_ARG = 144
def_op('BUILD_LIST_UNPACK', 149)
def_op('BUILD_MAP_UNPACK', 150)
def_op('BUILD_MAP_UNPACK_WITH_CALL', 151)
def_op('BUILD_TUPLE_UNPACK', 152)
def_op('BUILD_SET_UNPACK', 153)
jrel_op('SETUP_ASYNC_WITH', 154)
def_op('FORMAT_VALUE', 155)
def_op('BUILD_CONST_KEY_MAP', 156)
def_op('BUILD_STRING', 157)
def_op('BUILD_TUPLE_UNPACK_WITH_CALL', 158)
del def_op, name_op, jrel_op, jabs_op
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
import re
import signal
import warnings
from datetime import datetime
from functools import reduce
from itertools import filterfalse, tee
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
Iterable,
List,
MutableMapping,
Optional,
Tuple,
TypeVar,
)
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.utils.module_loading import import_string
if TYPE_CHECKING:
import jinja2
from airflow.models import TaskInstance
KEY_REGEX = re.compile(r'^[\w.-]+$')
GROUP_KEY_REGEX = re.compile(r'^[\w-]+$')
CAMELCASE_TO_SNAKE_CASE_REGEX = re.compile(r'(?!^)([A-Z]+)')
T = TypeVar('T')
S = TypeVar('S')
def validate_key(k: str, max_length: int = 250):
"""Validates value used as a key."""
if not isinstance(k, str):
raise TypeError(f"The key has to be a string and is {type(k)}:{k}")
if len(k) > max_length:
raise AirflowException(f"The key has to be less than {max_length} characters")
if not KEY_REGEX.match(k):
raise AirflowException(
f"The key ({k}) has to be made of alphanumeric characters, dashes, "
f"dots and underscores exclusively"
)
def validate_group_key(k: str, max_length: int = 200):
"""Validates value used as a group key."""
if not isinstance(k, str):
raise TypeError(f"The key has to be a string and is {type(k)}:{k}")
if len(k) > max_length:
raise AirflowException(f"The key has to be less than {max_length} characters")
if not GROUP_KEY_REGEX.match(k):
raise AirflowException(
f"The key ({k!r}) has to be made of alphanumeric characters, dashes and underscores exclusively"
)
def alchemy_to_dict(obj: Any) -> Optional[Dict]:
"""Transforms a SQLAlchemy model instance into a dictionary"""
if not obj:
return None
output = {}
for col in obj.__table__.columns:
value = getattr(obj, col.name)
if isinstance(value, datetime):
value = value.isoformat()
output[col.name] = value
return output
def ask_yesno(question: str, default: Optional[bool] = None) -> bool:
"""Helper to get a yes or no answer from the user."""
yes = {'yes', 'y'}
no = {'no', 'n'}
print(question)
while True:
choice = input().lower()
if choice == "" and default is not None:
return default
if choice in yes:
return True
if choice in no:
return False
print("Please respond with y/yes or n/no.")
def prompt_with_timeout(question: str, timeout: int, default: Optional[bool] = None) -> bool:
"""Ask the user a question and timeout if they don't respond"""
def handler(signum, frame):
raise AirflowException(f"Timeout {timeout}s reached")
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout)
try:
return ask_yesno(question, default)
finally:
signal.alarm(0)
def is_container(obj: Any) -> bool:
"""Test if an object is a container (iterable) but not a string"""
return hasattr(obj, '__iter__') and not isinstance(obj, str)
def as_tuple(obj: Any) -> tuple:
"""
If obj is a container, returns obj as a tuple.
Otherwise, returns a tuple containing obj.
"""
if is_container(obj):
return tuple(obj)
else:
return tuple([obj])
def chunks(items: List[T], chunk_size: int) -> Generator[List[T], None, None]:
"""Yield successive chunks of a given size from a list of items"""
if chunk_size <= 0:
raise ValueError('Chunk size must be a positive integer')
for i in range(0, len(items), chunk_size):
yield items[i : i + chunk_size]
def reduce_in_chunks(fn: Callable[[S, List[T]], S], iterable: List[T], initializer: S, chunk_size: int = 0):
"""
Reduce the given list of items by splitting it into chunks
of the given size and passing each chunk through the reducer
"""
if len(iterable) == 0:
return initializer
if chunk_size == 0:
chunk_size = len(iterable)
return reduce(fn, chunks(iterable, chunk_size), initializer)
def as_flattened_list(iterable: Iterable[Iterable[T]]) -> List[T]:
"""
Return an iterable with one level flattened
>>> as_flattened_list((('blue', 'red'), ('green', 'yellow', 'pink')))
['blue', 'red', 'green', 'yellow', 'pink']
"""
return [e for i in iterable for e in i]
def parse_template_string(template_string: str) -> Tuple[Optional[str], Optional["jinja2.Template"]]:
"""Parses Jinja template string."""
import jinja2
if "{{" in template_string: # jinja mode
return None, jinja2.Template(template_string)
else:
return template_string, None
def render_log_filename(ti: "TaskInstance", try_number, filename_template) -> str:
"""
Given task instance, try_number, filename_template, return the rendered log
filename
:param ti: task instance
:param try_number: try_number of the task
:param filename_template: filename template, which can be jinja template or
python string template
"""
filename_template, filename_jinja_template = parse_template_string(filename_template)
if filename_jinja_template:
jinja_context = ti.get_template_context()
jinja_context['try_number'] = try_number
return render_template_to_string(filename_jinja_template, jinja_context)
return filename_template.format(
dag_id=ti.dag_id,
task_id=ti.task_id,
execution_date=ti.execution_date.isoformat(),
try_number=try_number,
)
def convert_camel_to_snake(camel_str: str) -> str:
"""Converts CamelCase to snake_case."""
return CAMELCASE_TO_SNAKE_CASE_REGEX.sub(r'_\1', camel_str).lower()
def merge_dicts(dict1: Dict, dict2: Dict) -> Dict:
"""
Merge two dicts recursively, returning new dict (input dict is not mutated).
Lists are not concatenated. Items in dict2 overwrite those also found in dict1.
"""
merged = dict1.copy()
for k, v in dict2.items():
if k in merged and isinstance(v, dict):
merged[k] = merge_dicts(merged.get(k, {}), v)
else:
merged[k] = v
return merged
def partition(pred: Callable[[T], bool], iterable: Iterable[T]) -> Tuple[Iterable[T], Iterable[T]]:
"""Use a predicate to partition entries into false entries and true entries"""
iter_1, iter_2 = tee(iterable)
return filterfalse(pred, iter_1), filter(pred, iter_2)
def chain(*args, **kwargs):
"""This function is deprecated. Please use `airflow.models.baseoperator.chain`."""
warnings.warn(
"This function is deprecated. Please use `airflow.models.baseoperator.chain`.",
DeprecationWarning,
stacklevel=2,
)
return import_string('airflow.models.baseoperator.chain')(*args, **kwargs)
def cross_downstream(*args, **kwargs):
"""This function is deprecated. Please use `airflow.models.baseoperator.cross_downstream`."""
warnings.warn(
"This function is deprecated. Please use `airflow.models.baseoperator.cross_downstream`.",
DeprecationWarning,
stacklevel=2,
)
return import_string('airflow.models.baseoperator.cross_downstream')(*args, **kwargs)
def build_airflow_url_with_query(query: Dict[str, Any]) -> str:
"""
Build airflow url using base_url and default_view and provided query
For example:
'http://0.0.0.0:8000/base/graph?dag_id=my-task&root=&execution_date=2020-10-27T10%3A59%3A25.615587
"""
import flask
view = conf.get('webserver', 'dag_default_view').lower()
return flask.url_for(f"Airflow.{view}", **query)
# The 'template' argument is typed as Any because the jinja2.Template is too
# dynamic to be effectively type-checked.
def render_template(template: Any, context: MutableMapping[str, Any], *, native: bool) -> Any:
"""Render a Jinja2 template with given Airflow context.
The default implementation of ``jinja2.Template.render()`` converts the
input context into dict eagerly many times, which triggers deprecation
messages in our custom context class. This takes the implementation apart
and retain the context mapping without resolving instead.
:param template: A Jinja2 template to render.
:param context: The Airflow task context to render the template with.
:param native: If set to *True*, render the template into a native type. A
DAG can enable this with ``render_template_as_native_obj=True``.
:returns: The render result.
"""
context = copy.copy(context)
env = template.environment
if template.globals:
context.update((k, v) for k, v in template.globals.items() if k not in context)
try:
nodes = template.root_render_func(env.context_class(env, context, template.name, template.blocks))
except Exception:
env.handle_exception() # Rewrite traceback to point to the template.
if native:
import jinja2.nativetypes
return jinja2.nativetypes.native_concat(nodes)
return "".join(nodes)
def render_template_to_string(template: "jinja2.Template", context: MutableMapping[str, Any]) -> str:
"""Shorthand to ``render_template(native=False)`` with better typing support."""
return render_template(template, context, native=False)
def render_template_as_native(template: "jinja2.Template", context: MutableMapping[str, Any]) -> Any:
"""Shorthand to ``render_template(native=True)`` with better typing support."""
return render_template(template, context, native=True)
def exactly_one(*args) -> bool:
"""
Returns True if exactly one of *args is "truthy", and False otherwise.
If user supplies an iterable, we raise ValueError and force them to unpack.
"""
if is_container(args[0]):
raise ValueError(
"Not supported for iterable args. Use `*` to unpack your iterable in the function call."
)
return sum(map(bool, args)) == 1
def prune_dict(val: Any, mode='strict'):
"""
Given dict ``val``, returns new dict based on ``val`` with all
empty elements removed.
What constitutes "empty" is controlled by the ``mode`` parameter. If mode is 'strict'
then only ``None`` elements will be removed. If mode is ``truthy``, then element ``x``
will be removed if ``bool(x) is False``.
"""
def is_empty(x):
if mode == 'strict':
return x is None
elif mode == 'truthy':
return bool(x) is False
raise ValueError("allowable values for `mode` include 'truthy' and 'strict'")
if isinstance(val, dict):
new_dict = {}
for k, v in val.items():
if is_empty(v):
continue
elif isinstance(v, (list, dict)):
new_val = prune_dict(v, mode=mode)
if new_val:
new_dict[k] = new_val
else:
new_dict[k] = v
return new_dict
elif isinstance(val, list):
new_list = []
for v in val:
if is_empty(v):
continue
elif isinstance(v, (list, dict)):
new_val = prune_dict(v, mode=mode)
if new_val:
new_list.append(new_val)
else:
new_list.append(v)
return new_list
else:
return val
|
|
#!/usr/bin/env python
# encoding: utf-8
from ast import literal_eval
import codecs
import io
import pytest
import networkx as nx
from networkx.readwrite.gml import literal_stringizer, literal_destringizer
import os
import tempfile
try:
unicode
except NameError:
unicode = str
try:
unichr
except NameError:
unichr = chr
class TestGraph(object):
@classmethod
def setup_class(cls):
cls.simple_data = """Creator "me"
Version "xx"
graph [
comment "This is a sample graph"
directed 1
IsPlanar 1
pos [ x 0 y 1 ]
node [
id 1
label "Node 1"
pos [ x 1 y 1 ]
]
node [
id 2
pos [ x 1 y 2 ]
label "Node 2"
]
node [
id 3
label "Node 3"
pos [ x 1 y 3 ]
]
edge [
source 1
target 2
label "Edge from node 1 to node 2"
color [line "blue" thickness 3]
]
edge [
source 2
target 3
label "Edge from node 2 to node 3"
]
edge [
source 3
target 1
label "Edge from node 3 to node 1"
]
]
"""
def test_parse_gml_cytoscape_bug(self):
# example from issue #321, originally #324 in trac
cytoscape_example = """
Creator "Cytoscape"
Version 1.0
graph [
node [
root_index -3
id -3
graphics [
x -96.0
y -67.0
w 40.0
h 40.0
fill "#ff9999"
type "ellipse"
outline "#666666"
outline_width 1.5
]
label "node2"
]
node [
root_index -2
id -2
graphics [
x 63.0
y 37.0
w 40.0
h 40.0
fill "#ff9999"
type "ellipse"
outline "#666666"
outline_width 1.5
]
label "node1"
]
node [
root_index -1
id -1
graphics [
x -31.0
y -17.0
w 40.0
h 40.0
fill "#ff9999"
type "ellipse"
outline "#666666"
outline_width 1.5
]
label "node0"
]
edge [
root_index -2
target -2
source -1
graphics [
width 1.5
fill "#0000ff"
type "line"
Line [
]
source_arrow 0
target_arrow 3
]
label "DirectedEdge"
]
edge [
root_index -1
target -1
source -3
graphics [
width 1.5
fill "#0000ff"
type "line"
Line [
]
source_arrow 0
target_arrow 3
]
label "DirectedEdge"
]
]
"""
nx.parse_gml(cytoscape_example)
def test_parse_gml(self):
G = nx.parse_gml(self.simple_data, label='label')
assert (sorted(G.nodes()) ==
['Node 1', 'Node 2', 'Node 3'])
assert ([e for e in sorted(G.edges())] ==
[('Node 1', 'Node 2'),
('Node 2', 'Node 3'),
('Node 3', 'Node 1')])
assert ([e for e in sorted(G.edges(data=True))] ==
[('Node 1', 'Node 2',
{'color': {'line': 'blue', 'thickness': 3},
'label': 'Edge from node 1 to node 2'}),
('Node 2', 'Node 3',
{'label': 'Edge from node 2 to node 3'}),
('Node 3', 'Node 1',
{'label': 'Edge from node 3 to node 1'})])
def test_read_gml(self):
(fd, fname) = tempfile.mkstemp()
fh = open(fname, 'w')
fh.write(self.simple_data)
fh.close()
Gin = nx.read_gml(fname, label='label')
G = nx.parse_gml(self.simple_data, label='label')
assert sorted(G.nodes(data=True)) == sorted(Gin.nodes(data=True))
assert sorted(G.edges(data=True)) == sorted(Gin.edges(data=True))
os.close(fd)
os.unlink(fname)
def test_labels_are_strings(self):
# GML requires labels to be strings (i.e., in quotes)
answer = """graph [
node [
id 0
label "1203"
]
]"""
G = nx.Graph()
G.add_node(1203)
data = '\n'.join(nx.generate_gml(G, stringizer=literal_stringizer))
assert data == answer
def test_relabel_duplicate(self):
data = """
graph
[
label ""
directed 1
node
[
id 0
label "same"
]
node
[
id 1
label "same"
]
]
"""
fh = io.BytesIO(data.encode('UTF-8'))
fh.seek(0)
pytest.raises(
nx.NetworkXError, nx.read_gml, fh, label='label')
def test_tuplelabels(self):
# https://github.com/networkx/networkx/pull/1048
# Writing tuple labels to GML failed.
G = nx.OrderedGraph()
G.add_edge((0, 1), (1, 0))
data = '\n'.join(nx.generate_gml(G, stringizer=literal_stringizer))
answer = """graph [
node [
id 0
label "(0,1)"
]
node [
id 1
label "(1,0)"
]
edge [
source 0
target 1
]
]"""
assert data == answer
def test_quotes(self):
# https://github.com/networkx/networkx/issues/1061
# Encoding quotes as HTML entities.
G = nx.path_graph(1)
G.name = "path_graph(1)"
attr = 'This is "quoted" and this is a copyright: ' + unichr(169)
G.nodes[0]['demo'] = attr
fobj = tempfile.NamedTemporaryFile()
nx.write_gml(G, fobj)
fobj.seek(0)
# Should be bytes in 2.x and 3.x
data = fobj.read().strip().decode('ascii')
answer = """graph [
name "path_graph(1)"
node [
id 0
label "0"
demo "This is "quoted" and this is a copyright: ©"
]
]"""
assert data == answer
def test_unicode_node(self):
node = 'node' + unichr(169)
G = nx.Graph()
G.add_node(node)
fobj = tempfile.NamedTemporaryFile()
nx.write_gml(G, fobj)
fobj.seek(0)
# Should be bytes in 2.x and 3.x
data = fobj.read().strip().decode('ascii')
answer = """graph [
node [
id 0
label "node©"
]
]"""
assert data == answer
def test_float_label(self):
node = 1.0
G = nx.Graph()
G.add_node(node)
fobj = tempfile.NamedTemporaryFile()
nx.write_gml(G, fobj)
fobj.seek(0)
# Should be bytes in 2.x and 3.x
data = fobj.read().strip().decode('ascii')
answer = """graph [
node [
id 0
label "1.0"
]
]"""
assert data == answer
def test_name(self):
G = nx.parse_gml('graph [ name "x" node [ id 0 label "x" ] ]')
assert 'x' == G.graph['name']
G = nx.parse_gml('graph [ node [ id 0 label "x" ] ]')
assert '' == G.name
assert 'name' not in G.graph
def test_graph_types(self):
for directed in [None, False, True]:
for multigraph in [None, False, True]:
gml = 'graph ['
if directed is not None:
gml += ' directed ' + str(int(directed))
if multigraph is not None:
gml += ' multigraph ' + str(int(multigraph))
gml += ' node [ id 0 label "0" ]'
gml += ' edge [ source 0 target 0 ]'
gml += ' ]'
G = nx.parse_gml(gml)
assert bool(directed) == G.is_directed()
assert bool(multigraph) == G.is_multigraph()
gml = 'graph [\n'
if directed is True:
gml += ' directed 1\n'
if multigraph is True:
gml += ' multigraph 1\n'
gml += """ node [
id 0
label "0"
]
edge [
source 0
target 0
"""
if multigraph:
gml += ' key 0\n'
gml += ' ]\n]'
assert gml == '\n'.join(nx.generate_gml(G))
def test_data_types(self):
data = [True, False, 10 ** 20, -2e33, "'", '"&&&""',
[{(b'\xfd',): '\x7f', unichr(0x4444): (1, 2)}, (2, "3")]]
try: # fails under IronPython
data.append(unichr(0x14444))
except ValueError:
data.append(unichr(0x1444))
try: # fails under Python 2.7
data.append(literal_eval('{2.3j, 1 - 2.3j, ()}'))
except ValueError:
data.append([2.3j, 1 - 2.3j, ()])
G = nx.Graph()
G.name = data
G.graph['data'] = data
G.add_node(0, int=-1, data=dict(data=data))
G.add_edge(0, 0, float=-2.5, data=data)
gml = '\n'.join(nx.generate_gml(G, stringizer=literal_stringizer))
G = nx.parse_gml(gml, destringizer=literal_destringizer)
assert data == G.name
assert {'name': data, unicode('data'): data} == G.graph
assert (list(G.nodes(data=True)) ==
[(0, dict(int=-1, data=dict(data=data)))])
assert (list(G.edges(data=True)) ==
[(0, 0, dict(float=-2.5, data=data))])
G = nx.Graph()
G.graph['data'] = 'frozenset([1, 2, 3])'
G = nx.parse_gml(nx.generate_gml(G), destringizer=literal_eval)
assert G.graph['data'] == 'frozenset([1, 2, 3])'
def test_escape_unescape(self):
gml = """graph [
name "&"䑄��&unknown;"
]"""
G = nx.parse_gml(gml)
assert (
'&"\x0f' + unichr(0x4444) +
'��&unknown;' ==
G.name)
gml = '\n'.join(nx.generate_gml(G))
alnu = "#1234567890;&#x1234567890abcdef"
answer = """graph [
name "&"䑄&""" + alnu + """;&unknown;"
]"""
assert answer == gml
def test_exceptions(self):
pytest.raises(ValueError, literal_destringizer, '(')
pytest.raises(ValueError, literal_destringizer, 'frozenset([1, 2, 3])')
pytest.raises(ValueError, literal_destringizer, literal_destringizer)
pytest.raises(ValueError, literal_stringizer, frozenset([1, 2, 3]))
pytest.raises(ValueError, literal_stringizer, literal_stringizer)
with tempfile.TemporaryFile() as f:
f.write(codecs.BOM_UTF8 + 'graph[]'.encode('ascii'))
f.seek(0)
pytest.raises(nx.NetworkXError, nx.read_gml, f)
def assert_parse_error(gml):
pytest.raises(nx.NetworkXError, nx.parse_gml, gml)
assert_parse_error(['graph [\n\n', unicode(']')])
assert_parse_error('')
assert_parse_error('Creator ""')
assert_parse_error('0')
assert_parse_error('graph ]')
assert_parse_error('graph [ 1 ]')
assert_parse_error('graph [ 1.E+2 ]')
assert_parse_error('graph [ "A" ]')
assert_parse_error('graph [ ] graph ]')
assert_parse_error('graph [ ] graph [ ]')
assert_parse_error('graph [ data [1, 2, 3] ]')
assert_parse_error('graph [ node [ ] ]')
assert_parse_error('graph [ node [ id 0 ] ]')
nx.parse_gml('graph [ node [ id "a" ] ]', label='id')
assert_parse_error(
'graph [ node [ id 0 label 0 ] node [ id 0 label 1 ] ]')
assert_parse_error(
'graph [ node [ id 0 label 0 ] node [ id 1 label 0 ] ]')
assert_parse_error('graph [ node [ id 0 label 0 ] edge [ ] ]')
assert_parse_error('graph [ node [ id 0 label 0 ] edge [ source 0 ] ]')
nx.parse_gml(
'graph [edge [ source 0 target 0 ] node [ id 0 label 0 ] ]')
assert_parse_error(
'graph [ node [ id 0 label 0 ] edge [ source 1 target 0 ] ]')
assert_parse_error(
'graph [ node [ id 0 label 0 ] edge [ source 0 target 1 ] ]')
assert_parse_error(
'graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] '
'edge [ source 0 target 1 ] edge [ source 1 target 0 ] ]')
nx.parse_gml(
'graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] '
'edge [ source 0 target 1 ] edge [ source 1 target 0 ] '
'directed 1 ]')
nx.parse_gml(
'graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] '
'edge [ source 0 target 1 ] edge [ source 0 target 1 ]'
'multigraph 1 ]')
nx.parse_gml(
'graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] '
'edge [ source 0 target 1 key 0 ] edge [ source 0 target 1 ]'
'multigraph 1 ]')
assert_parse_error(
'graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] '
'edge [ source 0 target 1 key 0 ] edge [ source 0 target 1 key 0 ]'
'multigraph 1 ]')
nx.parse_gml(
'graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] '
'edge [ source 0 target 1 key 0 ] edge [ source 1 target 0 key 0 ]'
'directed 1 multigraph 1 ]')
# Tests for string convertable alphanumeric id and label values
nx.parse_gml(
'graph [edge [ source a target a ] node [ id a label b ] ]')
nx.parse_gml(
'graph [ node [ id n42 label 0 ] node [ id x43 label 1 ]'
'edge [ source n42 target x43 key 0 ]'
'edge [ source x43 target n42 key 0 ]'
'directed 1 multigraph 1 ]')
assert_parse_error(
"graph [edge [ source u'u\4200' target u'u\4200' ] " +
"node [ id u'u\4200' label b ] ]")
def assert_generate_error(*args, **kwargs):
pytest.raises(nx.NetworkXError,
lambda: list(nx.generate_gml(*args, **kwargs)))
G = nx.Graph()
G.graph[3] = 3
assert_generate_error(G)
G = nx.Graph()
G.graph['3'] = 3
assert_generate_error(G)
G = nx.Graph()
G.graph['data'] = frozenset([1, 2, 3])
assert_generate_error(G, stringizer=literal_stringizer)
G = nx.Graph()
G.graph['data'] = []
assert_generate_error(G)
assert_generate_error(G, stringizer=len)
def test_label_kwarg(self):
G = nx.parse_gml(self.simple_data, label='id')
assert sorted(G.nodes) == [1, 2, 3]
labels = [G.nodes[n]['label'] for n in sorted(G.nodes)]
assert labels == ['Node 1', 'Node 2', 'Node 3']
G = nx.parse_gml(self.simple_data, label=None)
assert sorted(G.nodes) == [1, 2, 3]
labels = [G.nodes[n]['label'] for n in sorted(G.nodes)]
assert labels == ['Node 1', 'Node 2', 'Node 3']
def test_outofrange_integers(self):
# GML restricts integers to 32 signed bits.
# Check that we honor this restriction on export
G = nx.Graph()
# Test export for numbers that barely fit or don't fit into 32 bits,
# and 3 numbers in the middle
numbers = {'toosmall': (-2**31)-1,
'small': -2**31,
'med1': -4,
'med2': 0,
'med3': 17,
'big': (2**31)-1,
'toobig': 2**31}
G.add_node('Node', **numbers)
fd, fname = tempfile.mkstemp()
try:
nx.write_gml(G, fname)
# Check that the export wrote the nonfitting numbers as strings
G2 = nx.read_gml(fname)
for attr, value in G2.nodes['Node'].items():
if attr == 'toosmall' or attr == 'toobig':
assert type(value) == str
else:
assert type(value) == int
finally:
os.close(fd)
os.unlink(fname)
|
|
# This is free software; you can redistribute it and/or modify it under the
# terms of MIT free software license as published by the Massachusetts
# Institute of Technology.
#
# Copyright 2014. Pine Studio
import types
import re
from jinja2 import Template
human_template = """package {{package}}
{
public class {{class_name}} extends _{{class_name}}
{
public function {{class_name}}()
{
}
}
}
"""
class HumanClassWritter:
_file = None
_package = ""
_class_name = ""
_verbose = False
def __init__(self, file, package, class_name, verbose):
self._file = file
self._package = package
self._class_name = class_name
self._verbose = verbose
def close_file(self):
self._file.close()
def write_class(self):
template = Template(human_template, trim_blocks=True, lstrip_blocks=True)
render = template.render({"package": self._package, "class_name": self._class_name})
if (self._verbose):
print "------------------------------------------------------------"
print "Writing human class: " + self._class_name + "\n"
print render
self._file.write(render)
machine_template = """package {{package}}
{
import Modi.*;
{% for path in imports %}
import {{path}};
{% endfor %}
public class _{{class_name}} extends {{super_class_name}}
{
public static const ATTRIBUTES:Array = [{% for a in attributes %}"{{a.name}}",{% endfor %}];
public static const ATTRIBUTE_TYPES:Array = [{% for a in attributes %}"{{a.type}}",{% endfor %}];
{% for a in attributes %}
public static const ATTRIBUTE_{{a.uname}}:String = "{{a.name}}";
{% endfor %}
{% for a in attributes %}
{% if a.values_|length > 0 %}
{% for value in a.values_ %}
public static const {{a.uname}}_{{value.upper()}}:String = "{{value}}";
{% endfor %}
public static const {{a.uname}}_ENUM_ARRAY:Array = [{% for value in a.values_ %}{{a.uname}}_{{value.upper()}}, {% endfor %}];
{% endif %}
{% endfor %}
{% for a in attributes %}
private var _{{a.name}}:{{a.type}};
{% endfor %}
public function _{{class_name}}()
{
registerAttributes(ATTRIBUTES, ATTRIBUTE_TYPES);
{% for a in attributes %}
{% if a.default_value %}
_{{a.name}} = {{a.default_value}};
{% endif %}
{% if a.child_type %}
_{{a.name}}.childType = "{{a.child_type}}";
{% endif %}
{% endfor %}
}
{% for a in attributes %}
public final function set {{a.name}}(value:{{a.type}}):void { dispatchChangeEvent(ATTRIBUTE_{{a.uname}}, _{{a.name}}, _{{a.name}} = value); }
public final function set {{a.name}}DirectUnsafe(value:{{a.type}}):void { _{{a.name}} = value; }
public final function get {{a.name}}():{{a.type}} { return _{{a.name}}; }
{% endfor %}
}
}
"""
class MachineClassWritter:
_file = None
_package = ""
_class_name = ""
_class_data = ""
_verbose = False
def __init__(self, file, package, class_name, class_data, verbose):
self._file = file
self._package = package
self._class_name = class_name
self._class_data = class_data
self._verbose = verbose
def close_file(self):
self._file.close()
def write_class(self):
imports = []
if "imports" in self._class_data:
imports_data = self._class_data["imports"]
for import_path in imports_data:
imports.append(import_path)
super_class = "ManagedObject"
if "super" in self._class_data:
super_class = self._class_data["super"]
########################################################################
attributes = []
for name in self._class_data:
if is_reserved_word(name):
continue
uname = to_uppercase_with_underscores(name)
data = self._class_data[name]
type_ = data
values = []
default_value = None
child_type = None
if "Managed" in data:
type_ = get_modi_class(data)
if type_ == "ManagedObjectId":
default_value = "ManagedObjectId.UNDEFINED"
else:
default_value = "new " + type_ + "()"
if type_ == "ManagedArray":
element_type = get_managed_array_element_type(data)
if element_type != "ManagedObject":
if is_modi_class(element_type):
child_type = "Modi." + element_type
else:
# User can specify his own package.
if element_type.find(".") != -1:
child_type = element_type
# If no package is specified, package that was given as parameter to the script will be used.
else:
# If there is some package, dot must be appended before class name.
if self._package != "":
child_type = self._package + "." + element_type
else:
child_type = element_type
elif type(data) == dict:
if "type" in data:
type_ = data["type"]
else:
type_ = "String"
if "default" in data:
defaultValue = data["default"]
if "values" in data:
default_value = uname + "_" + defaultValue.upper()
elif "type" in data:
if data["type"] == "String":
default_value = "\"" + str(defaultValue) + "\""
elif data["type"] == "ManagedValue":
default_value = "new ManagedValue(" + str(defaultValue) + ")"
elif data["type"] == "Boolean":
default_value = str(defaultValue).lower()
else:
default_value = str(defaultValue)
if "values" in data:
for value in data["values"]:
values.append(value)
elif data == "Array":
# Array value.
default_value = "[]"
hash = {"name": name, "uname": uname, "type": type_, "values_": values}
if default_value is not None:
hash["default_value"] = default_value
if child_type is not None:
hash["child_type"] = child_type
attributes.append(hash)
########################################################################
template = Template(machine_template, trim_blocks=True, lstrip_blocks=True)
render = template.render({
"package": self._package, "imports": imports,
"class_name": self._class_name, "super_class_name": super_class,
"attributes": attributes})
if self._verbose:
print "------------------------------------------------------------"
print "Writing machine class: _" + self._class_name + "\n"
print render
self._file.write(render)
# Converts attribute name to uppercase with underscores between words.
def to_uppercase_with_underscores(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).upper()
# TODO: Rewrite this function using regular expressions.
def get_managed_array_element_type(str):
child_type = "ManagedObject"
counts = False
for char in str:
if char == ">":
counts = False
if counts:
child_type += char
if char == "<":
child_type = ""
counts = True
return child_type;
# TODO: Rewrite this function using regular expressions.
def get_modi_class(str):
modi_class = ""
counts = True
for char in str:
if char == "<":
counts = False
if counts:
modi_class += char
return modi_class;
# Returns True if className is one of the Modi classes, false otherwise.
def is_modi_class(className):
modi_classes = ["ManagedArray", "ManagedValue", "ManagedPoint", "ManagedObjectId"]
for modi_class in modi_classes:
if modi_class == className:
return True
return False
def is_reserved_word(word):
reserved_words = ["super", "imports"]
for reserved_word in reserved_words:
if reserved_word == word:
return True
return False
|
|
import torch
import torch.nn as nn
import math
from torch.autograd import Function
class PairwiseDistance(Function):
def __init__(self, p):
super(PairwiseDistance, self).__init__()
self.norm = p
def forward(self, x1, x2):
assert x1.size() == x2.size()
eps = 1e-4 / x1.size(1)
diff = torch.abs(x1 - x2)
out = torch.pow(diff, self.norm).sum(dim=1)
return torch.pow(out + eps, 1. / self.norm)
class TripletMarginLoss(Function):
"""Triplet loss function.
"""
def __init__(self, margin):
super(TripletMarginLoss, self).__init__()
self.margin = margin
self.pdist = PairwiseDistance(2) # norm 2
def forward(self, anchor, positive, negative):
d_p = self.pdist.forward(anchor, positive)
d_n = self.pdist.forward(anchor, negative)
dist_hinge = torch.clamp(self.margin + d_p - d_n, min=0.0)
loss = torch.mean(dist_hinge)
return loss
class ReLU(nn.Hardtanh):
def __init__(self, inplace=False):
super(ReLU, self).__init__(0, 20, inplace)
def __repr__(self):
inplace_str = 'inplace' if self.inplace else ''
return self.__class__.__name__ + ' (' \
+ inplace_str + ')'
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class myResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
super(myResNet, self).__init__()
self.relu = ReLU(inplace=True)
self.inplanes = 64
self.conv1 = nn.Conv2d(1, 64, kernel_size=5, stride=2, padding=2,bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, layers[0])
self.inplanes = 128
self.conv2 = nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2,bias=False)
self.bn2 = nn.BatchNorm2d(128)
self.layer2 = self._make_layer(block, 128, layers[1])
self.inplanes = 256
self.conv3 = nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2,bias=False)
self.bn3 = nn.BatchNorm2d(256)
self.layer3 = self._make_layer(block, 256, layers[2])
self.inplanes = 512
self.conv4 = nn.Conv2d(256, 512, kernel_size=5, stride=2, padding=2,bias=False)
self.bn4 = nn.BatchNorm2d(512)
self.layer4 = self._make_layer(block, 512, layers[3])
self.avgpool = nn.AdaptiveAvgPool2d((1,None))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
layers = []
layers.append(block(self.inplanes, planes, stride))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class DeepSpeakerModel(nn.Module):
def __init__(self,embedding_size,num_classes,feature_dim = 64):
super(DeepSpeakerModel, self).__init__()
self.embedding_size = embedding_size
self.model = myResNet(BasicBlock, [1, 1, 1, 1])
if feature_dim == 64:
self.model.fc = nn.Linear(512*4, self.embedding_size)
elif feature_dim == 40:
self.model.fc = nn.Linear(256 * 5, self.embedding_size)
self.model.classifier = nn.Linear(self.embedding_size, num_classes)
def l2_norm(self,input):
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-10)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
return output
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.layer1(x)
x = self.model.conv2(x)
x = self.model.bn2(x)
x = self.model.relu(x)
x = self.model.layer2(x)
x = self.model.conv3(x)
x = self.model.bn3(x)
x = self.model.relu(x)
x = self.model.layer3(x)
x = self.model.conv4(x)
x = self.model.bn4(x)
x = self.model.relu(x)
x = self.model.layer4(x)
x = self.model.avgpool(x)
x = x.view(x.size(0), -1)
x = self.model.fc(x)
self.features = self.l2_norm(x)
# Multiply by alpha = 10 as suggested in https://arxiv.org/pdf/1703.09507.pdf
alpha=10
self.features = self.features*alpha
#x = x.resize(int(x.size(0) / 17),17 , 512)
#self.features =torch.mean(x,dim=1)
#x = self.model.classifier(self.features)
return self.features
def forward_classifier(self, x):
features = self.forward(x)
res = self.model.classifier(features)
return res
|
|
# -- Content-Encoding: utf-8 --
"""
Utility methods implementations for Win32
**TODO:**
* Complete review/refactoring
* Tests
:author: Thomas Calmant
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
import cohorte
import cohorte.utils as utils
import logging
import os
import sys
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Windows specific modules
# pylint: disable=F0401
import pywintypes
import win32api
import win32event
import win32process
try:
# Python 3
import winreg
except ImportError:
# Python 2
import _winreg as winreg
# ------------------------------------------------------------------------------
# From http://msdn.microsoft.com/en-us/library/ms681382%28v=VS.85%29.aspx
ERROR_INVALID_PARAMETER = 0x57
# From http://msdn.microsoft.com/en-us/library/ms684880%28v=VS.85%29.aspx
SYNCHRONIZE = 0x00100000
PROCESS_TERMINATE = 1
PROCESS_QUERY_INFORMATION = 0x0400
# From http://msdn.microsoft.com/en-us/library/ms683189%28v=VS.85%29.aspx
STILL_ACTIVE = 259
# From windows.h
INFINITE = 0xFFFFFFFF
# From win32.h
WAIT_TIMEOUT = 0x00000102
WAIT_FAILED = 0xFFFFFFFF
# ------------------------------------------------------------------------------
def get_registry_java_home():
"""
Retrieves the value of the JavaHome registry key
"""
jre_keys = (r"SOFTWARE\JavaSoft\Java Runtime Environment",
r"SOFTWARE\Wow6432Node\JavaSoft\Java Runtime Environment")
for jre_key_name in jre_keys:
try:
jre_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, jre_key_name)
# Compute current version key name
value = winreg.QueryValueEx(jre_key, "CurrentVersion")
if not value:
_logger.warning("No 'current' JVM in registry.")
return None
# Close the key
winreg.CloseKey(jre_key)
# Get its JavaHome
current_jre_key_name = jre_key_name + "\\" + value[0]
jre_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
current_jre_key_name)
value = winreg.QueryValueEx(jre_key, "JavaHome")
if not value:
_logger.warning("No current JavaHome in registry.")
return None
# Value found
return utils.remove_quotes(value[0])
except WindowsError as ex:
_logger.warning("Java path lookup error in the registry: %s (%s)",
ex, jre_key_name)
_logger.error("Java Runtime not found in registry")
return None
class OSUtils(utils.BaseOSUtils):
"""
Utility class implementation for Win32
"""
def find_java_interpreter(self, java_home):
"""
Finds the Java interpreter, in the given Java Home if possible
:param java_home: The preferred Java home
"""
# Case 1 : Try "preferred" JVM (embedded one)
java_home = utils.remove_quotes(java_home)
java = self._test_java_path(java_home)
if java is not None:
return java
# Case 2 : Use registry
java_home = get_registry_java_home()
java = self._test_java_path(java_home)
if java is not None:
return java
# Case 3 : Try with JAVA_HOME environment variable
java_home = utils.remove_quotes(os.getenv(cohorte.ENV_JAVA_HOME))
java = self._test_java_path(java_home)
if java is not None:
return java
# Case 4 : Try with all with PATH
return utils.find_in_path("java.exe")
def find_python2_interpreter(self):
"""
Finds a Python 2 interpreter
:return: The path to the first Python 2 interpreter found, or None
"""
# Try with embedded interpreter first
if self.home is not None:
path = os.path.join(self.home, "bin", "python2", "bin",
"python.exe")
if os.path.exists(path):
return os.path.abspath(path)
# Try with current interpreter
if sys.version_info[0] == 2 and sys.executable is not None:
return sys.executable
# TODO: try with the registry
# Try in the path
return utils.find_in_path("python3.exe")
def find_python3_interpreter(self):
"""
Finds a Python 3 interpreter
:return: The path to the first Python 3 interpreter found, or None
"""
# Try with embedded interpreter first
if self.home is not None:
path = os.path.join(self.home, "bin", "python3", "bin",
"python3.exe")
if os.path.exists(path):
return os.path.abspath(path)
# Try with current interpreter
if sys.version_info[0] == 3 and sys.executable is not None:
return sys.executable
# TODO: try with the registry
# Try in the path
return utils.find_in_path("python.exe")
def is_process_running(self, pid):
"""
Tests if the given process is running
:param pid: PID of the process to test
:return: True if the process is running else False
"""
if pid < 0:
# Invalid PID
return False
try:
# Windows loves handles
handle = win32api.OpenProcess(PROCESS_QUERY_INFORMATION,
False, pid)
except pywintypes.error as ex:
# PID not in the system anymore
if ex.winerror == ERROR_INVALID_PARAMETER:
return False
# Other kind of exception
raise ex
if not handle:
# OpenProcess failed
return False
# Look at the process state
exit_code = win32process.GetExitCodeProcess(handle)
# Clean the place before leaving
win32api.CloseHandle(handle)
# Return real state
return exit_code == STILL_ACTIVE
def kill_pid(self, pid):
"""
Kills the given PID, if possible
:param pid: PID of the process to kill
:raise ValueError: Invalid PID
:raise OSError: Unauthorized operation
"""
if pid is None or not self.is_process_running(pid):
raise ValueError("Invalid PID: %d" % pid)
handle = None
try:
handle = win32api.OpenProcess(PROCESS_TERMINATE, False, pid)
win32api.TerminateProcess(handle, -1)
except pywintypes.error as ex:
# PID not in the system anymore
if ex.winerror == ERROR_INVALID_PARAMETER:
raise ValueError("Invalid PID: %d" % pid)
# Other kind of exception
raise ex
finally:
if handle is not None:
win32api.CloseHandle(handle)
def wait_pid(self, pid, timeout=None):
"""
Waits for process with the given PID to terminate and return its
exit status code as an integer.
If PID is not a children of os.getpid() (current process) just
waits until the process disappears and return None.
If pid does not exist at all return None immediately.
Raise TimeoutExpired on timeout expired.
Code converted from C from the psutil Python library:
Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
:param pid: The PID to wait for
:param timeout: The maximum time to wait, in seconds.
None to wait forever
:raise TimeoutExpired: when timeout expired.
"""
if pid == 0:
return None
try:
# Windows loves handles
handle = win32api.OpenProcess(
SYNCHRONIZE | PROCESS_QUERY_INFORMATION, False, pid)
except pywintypes.error as ex:
# PID not in the system anymore
if ex.winerror == ERROR_INVALID_PARAMETER:
return None
# Other kind of exception
raise ex
if not handle:
# OpenProcess failed
return None
if timeout is None:
# There is no "None" on Windows
timeout = INFINITE
else:
# Convert to an integer in milliseconds
timeout = int(timeout * 1000)
try:
ret_val = win32event.WaitForSingleObject(handle, timeout)
if ret_val == WAIT_TIMEOUT:
# Time out raised
raise utils.TimeoutExpired(pid)
return win32process.GetExitCodeProcess(handle)
finally:
# Always clean up
win32api.CloseHandle(handle)
def _test_java_path(self, java_home):
"""
Tries to return the path to a Java interpreter
:param java_home: The Java home to test
:return: The Java interpreter path or None
"""
if not java_home:
return None
java = os.path.join(java_home, "bin", "java.exe")
if utils.is_file(java):
return java
return None
|
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutils`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pytz.sourceforge.net>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <http://labix.org/python-dateutil>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, eg MO, TU
* :class:`MonthLocator`: locate months, eg 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<http://labix.org/python-dateutil>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import print_function
import re
import time
import math
import datetime
from itertools import izip
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MINUTES_PER_DAY = 60. * HOURS_PER_DAY
SECONDS_PER_DAY = 60. * MINUTES_PER_DAY
MUSECONDS_PER_DAY = 1e6 * SECONDS_PER_DAY
SEC_PER_MIN = 60
SEC_PER_HOUR = 3600
SEC_PER_DAY = SEC_PER_HOUR * 24
SEC_PER_WEEK = SEC_PER_DAY * 7
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` to the Gregorian date as UTC float days,
preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if hasattr(dt, 'hour'):
base += (dt.hour / HOURS_PER_DAY + dt.minute / MINUTES_PER_DAY +
dt.second / SECONDS_PER_DAY +
dt.microsecond / MUSECONDS_PER_DAY
)
return base
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix)
remainder = float(x) - ix
hour, remainder = divmod(24 * remainder, 1)
minute, remainder = divmod(60 * remainder, 1)
second, remainder = divmod(60 * remainder, 1)
microsecond = int(1e6 * remainder)
if microsecond < 10:
microsecond = 0 # compensate for rounding errors
dt = datetime.datetime(
dt.year, dt.month, dt.day, int(hour), int(minute), int(second),
microsecond, tzinfo=UTC).astimezone(tz)
if microsecond > 999990: # compensate for rounding errors
dt += datetime.timedelta(microseconds=1e6 - microsecond)
return dt
class strpdate2num:
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
def datestr2num(d):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`. *d* can be a single string or a
sequence of strings.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d)
return date2num(dt)
else:
return date2num([dateutil.parser.parse(s) for s in d])
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
return np.asarray([_to_ordinalf(val) for val in d])
def julian2num(j):
'Convert a Julian date (or sequence) to a matplotlib date (or sequence).'
if cbook.iterable(j):
j = np.asarray(j)
return j - 1721424.5
def num2julian(n):
'Convert a matplotlib date (or sequence) to a Julian date (or sequence).'
if cbook.iterable(n):
n = np.asarray(n)
return n + 1721424.5
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
return [_from_ordinalf(val, tz) for val in x]
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
step = (delta.days + delta.seconds / SECONDS_PER_DAY +
delta.microseconds / MUSECONDS_PER_DAY)
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is an :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, eg with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _findall(self, text, substr):
# Also finds overlaps
sites = []
i = 0
while 1:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i = j + 1
return sites
# Dalke: I hope I did this math right. Every 28 years the
# calendar repeats, except through century leap years excepting
# the 400 year leap years. But only if you're using the Gregorian
# calendar.
def strftime(self, dt, fmt):
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year > 1900:
return cbook.unicode_safe(dt.strftime(fmt))
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year) // 28) * 28
timetuple = dt.timetuple()
s1 = time.strftime(fmt, (year,) + timetuple[1:])
sites1 = self._findall(s1, str(year))
s2 = time.strftime(fmt, (year + 28,) + timetuple[1:])
sites2 = self._findall(s2, str(year + 28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = "%4d" % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site + 4:]
return cbook.unicode_safe(s)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%D',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
formatter = AutoDateFormatter()
formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autofmt the date labels. The default format is the one to use
if none of the times in scaled match
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {365.0: '%Y',
30.: '%b %Y',
1.0: '%b %d %Y',
1. / 24.: '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f'}
def __call__(self, x, pos=0):
scale = float(self._locator._get_unit())
fmt = self.defaultfmt
for k in sorted(self.scaled):
if k >= scale:
fmt = self.scaled[k]
break
self._formatter = DateFormatter(fmt, self._tz)
return self._formatter(x, pos)
class rrulewrapper:
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
self.tz = tz
def datalim_to_dt(self):
dmin, dmax = self.axis.get_data_interval()
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
vmin, vmax = self.axis.get_view_interval()
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
if dmin > dmax:
dmax, dmin = dmin, dmax
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((dmax, dmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, dmin, dmax,
self.MAXTICKS * 2))
dates = self.rule.between(dmin, dmax, True)
if len(dates) == 0:
return date2num([dmin, dmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if (freq == YEARLY):
return 365.0
elif (freq == MONTHLY):
return 30.0
elif (freq == WEEKLY):
return 7.0
elif (freq == DAILY):
return 1.0
elif (freq == HOURLY):
return (1.0 / 24.0)
elif (freq == MINUTELY):
return (1.0 / (24 * 60))
elif (freq == SECONDLY):
return (1.0 / (24 * 3600))
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
if dmin > dmax:
dmax, dmin = dmin, dmax
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(izip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32), range(0, 24),
range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - 365 * 2
vmax = vmax + 365 * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
numYears = (delta.years * 1.0)
numMonths = (numYears * 12.0) + delta.months
numDays = (numMonths * 31.0) + delta.days
numHours = (numDays * 24.0) + delta.hours
numMinutes = (numHours * 60.0) + delta.minutes
numSeconds = (numMinutes * 60.0) + delta.seconds
numMicroseconds = (numSeconds * 1e6) + delta.microseconds
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(izip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
dmin, dmax = self.viewlim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
ticks = [dmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, eg 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
o = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutils.rrule`.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
o = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
o = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self, *args, **kwargs):
vmin, vmax = self.axis.get_view_interval()
vmin *= MUSECONDS_PER_DAY
vmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(vmin, vmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
'Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.'
delta = d2 - d1
mus = abs(delta.days * MUSECONDS_PER_DAY + delta.seconds * 1e6 +
delta.microseconds)
assert(mus < epsilon)
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert(delta < epsilon)
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
spd = 24. * 3600.
return 719163 + np.asarray(e) / spd
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
spd = 24. * 3600.
return (np.asarray(d) - 719163) * spd
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / 24.
minutes = span * 24 * 60
hours = span * 24
days = span
weeks = span / 7.
months = span / 31. # approx
years = span / 365.
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif weeks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hours > numticks:
locator = HourLocator(interval=int(math.ceil(hours / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif minutes > numticks:
locator = MinuteLocator(interval=int(math.ceil(minutes / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
'Return seconds as days.'
return float(s) / SEC_PER_DAY
def minutes(m):
'Return minutes as days.'
return float(m) / MINUTES_PER_DAY
def hours(h):
'Return hours as days.'
return h / 24.
def weeks(w):
'Return weeks as days.'
return w * 7.
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
'Return the tzinfo instance of *x* or of its first element, or None'
try:
x = x[0]
except (TypeError, IndexError):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
|
import datetime
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.http import Http404
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import gettext as _
from django.views.generic.base import View
from django.views.generic.detail import (
BaseDetailView, SingleObjectTemplateResponseMixin,
)
from django.views.generic.list import (
MultipleObjectMixin, MultipleObjectTemplateResponseMixin,
)
class YearMixin:
"""Mixin for views manipulating year-based data."""
year_format = '%Y'
year = None
def get_year_format(self):
"""
Get a year format string in strptime syntax to be used to parse the
year from url variables.
"""
return self.year_format
def get_year(self):
"""Return the year for which this view should display data."""
year = self.year
if year is None:
try:
year = self.kwargs['year']
except KeyError:
try:
year = self.request.GET['year']
except KeyError:
raise Http404(_("No year specified"))
return year
def get_next_year(self, date):
"""Get the next valid year."""
return _get_next_prev(self, date, is_previous=False, period='year')
def get_previous_year(self, date):
"""Get the previous valid year."""
return _get_next_prev(self, date, is_previous=True, period='year')
def _get_next_year(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
try:
return date.replace(year=date.year + 1, month=1, day=1)
except ValueError:
raise Http404(_("Date out of range"))
def _get_current_year(self, date):
"""Return the start date of the current interval."""
return date.replace(month=1, day=1)
class MonthMixin:
"""Mixin for views manipulating month-based data."""
month_format = '%b'
month = None
def get_month_format(self):
"""
Get a month format string in strptime syntax to be used to parse the
month from url variables.
"""
return self.month_format
def get_month(self):
"""Return the month for which this view should display data."""
month = self.month
if month is None:
try:
month = self.kwargs['month']
except KeyError:
try:
month = self.request.GET['month']
except KeyError:
raise Http404(_("No month specified"))
return month
def get_next_month(self, date):
"""Get the next valid month."""
return _get_next_prev(self, date, is_previous=False, period='month')
def get_previous_month(self, date):
"""Get the previous valid month."""
return _get_next_prev(self, date, is_previous=True, period='month')
def _get_next_month(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
if date.month == 12:
try:
return date.replace(year=date.year + 1, month=1, day=1)
except ValueError:
raise Http404(_("Date out of range"))
else:
return date.replace(month=date.month + 1, day=1)
def _get_current_month(self, date):
"""Return the start date of the previous interval."""
return date.replace(day=1)
class DayMixin:
"""Mixin for views manipulating day-based data."""
day_format = '%d'
day = None
def get_day_format(self):
"""
Get a day format string in strptime syntax to be used to parse the day
from url variables.
"""
return self.day_format
def get_day(self):
"""Return the day for which this view should display data."""
day = self.day
if day is None:
try:
day = self.kwargs['day']
except KeyError:
try:
day = self.request.GET['day']
except KeyError:
raise Http404(_("No day specified"))
return day
def get_next_day(self, date):
"""Get the next valid day."""
return _get_next_prev(self, date, is_previous=False, period='day')
def get_previous_day(self, date):
"""Get the previous valid day."""
return _get_next_prev(self, date, is_previous=True, period='day')
def _get_next_day(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
return date + datetime.timedelta(days=1)
def _get_current_day(self, date):
"""Return the start date of the current interval."""
return date
class WeekMixin:
"""Mixin for views manipulating week-based data."""
week_format = '%U'
week = None
def get_week_format(self):
"""
Get a week format string in strptime syntax to be used to parse the
week from url variables.
"""
return self.week_format
def get_week(self):
"""Return the week for which this view should display data."""
week = self.week
if week is None:
try:
week = self.kwargs['week']
except KeyError:
try:
week = self.request.GET['week']
except KeyError:
raise Http404(_("No week specified"))
return week
def get_next_week(self, date):
"""Get the next valid week."""
return _get_next_prev(self, date, is_previous=False, period='week')
def get_previous_week(self, date):
"""Get the previous valid week."""
return _get_next_prev(self, date, is_previous=True, period='week')
def _get_next_week(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
try:
return date + datetime.timedelta(days=7 - self._get_weekday(date))
except OverflowError:
raise Http404(_("Date out of range"))
def _get_current_week(self, date):
"""Return the start date of the current interval."""
return date - datetime.timedelta(self._get_weekday(date))
def _get_weekday(self, date):
"""
Return the weekday for a given date.
The first day according to the week format is 0 and the last day is 6.
"""
week_format = self.get_week_format()
if week_format == '%W': # week starts on Monday
return date.weekday()
elif week_format == '%U': # week starts on Sunday
return (date.weekday() + 1) % 7
else:
raise ValueError("unknown week format: %s" % week_format)
class DateMixin:
"""Mixin class for views manipulating date-based data."""
date_field = None
allow_future = False
def get_date_field(self):
"""Get the name of the date field to be used to filter by."""
if self.date_field is None:
raise ImproperlyConfigured("%s.date_field is required." % self.__class__.__name__)
return self.date_field
def get_allow_future(self):
"""
Return `True` if the view should be allowed to display objects from
the future.
"""
return self.allow_future
# Note: the following three methods only work in subclasses that also
# inherit SingleObjectMixin or MultipleObjectMixin.
@cached_property
def uses_datetime_field(self):
"""
Return `True` if the date field is a `DateTimeField` and `False`
if it's a `DateField`.
"""
model = self.get_queryset().model if self.model is None else self.model
field = model._meta.get_field(self.get_date_field())
return isinstance(field, models.DateTimeField)
def _make_date_lookup_arg(self, value):
"""
Convert a date into a datetime when the date field is a DateTimeField.
When time zone support is enabled, `date` is assumed to be in the
current time zone, so that displayed items are consistent with the URL.
"""
if self.uses_datetime_field:
value = datetime.datetime.combine(value, datetime.time.min)
if settings.USE_TZ:
value = timezone.make_aware(value, timezone.get_current_timezone())
return value
def _make_single_date_lookup(self, date):
"""
Get the lookup kwargs for filtering on a single date.
If the date field is a DateTimeField, we can't just filter on
date_field=date because that doesn't take the time into account.
"""
date_field = self.get_date_field()
if self.uses_datetime_field:
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(date + datetime.timedelta(days=1))
return {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
else:
# Skip self._make_date_lookup_arg, it's a no-op in this branch.
return {date_field: date}
class BaseDateListView(MultipleObjectMixin, DateMixin, View):
"""Abstract base class for date-based views displaying a list of objects."""
allow_empty = False
date_list_period = 'year'
def get(self, request, *args, **kwargs):
self.date_list, self.object_list, extra_context = self.get_dated_items()
context = self.get_context_data(
object_list=self.object_list,
date_list=self.date_list,
**extra_context
)
return self.render_to_response(context)
def get_dated_items(self):
"""Obtain the list of dates and items."""
raise NotImplementedError('A DateView must provide an implementation of get_dated_items()')
def get_ordering(self):
"""
Return the field or fields to use for ordering the queryset; use the
date field by default.
"""
return '-%s' % self.get_date_field() if self.ordering is None else self.ordering
def get_dated_queryset(self, **lookup):
"""
Get a queryset properly filtered according to `allow_future` and any
extra lookup kwargs.
"""
qs = self.get_queryset().filter(**lookup)
date_field = self.get_date_field()
allow_future = self.get_allow_future()
allow_empty = self.get_allow_empty()
paginate_by = self.get_paginate_by(qs)
if not allow_future:
now = timezone.now() if self.uses_datetime_field else timezone_today()
qs = qs.filter(**{'%s__lte' % date_field: now})
if not allow_empty:
# When pagination is enabled, it's better to do a cheap query
# than to load the unpaginated queryset in memory.
is_empty = len(qs) == 0 if paginate_by is None else not qs.exists()
if is_empty:
raise Http404(_("No %(verbose_name_plural)s available") % {
'verbose_name_plural': qs.model._meta.verbose_name_plural,
})
return qs
def get_date_list_period(self):
"""
Get the aggregation period for the list of dates: 'year', 'month', or
'day'.
"""
return self.date_list_period
def get_date_list(self, queryset, date_type=None, ordering='ASC'):
"""
Get a date list by calling `queryset.dates/datetimes()`, checking
along the way for empty lists that aren't allowed.
"""
date_field = self.get_date_field()
allow_empty = self.get_allow_empty()
if date_type is None:
date_type = self.get_date_list_period()
if self.uses_datetime_field:
date_list = queryset.datetimes(date_field, date_type, ordering)
else:
date_list = queryset.dates(date_field, date_type, ordering)
if date_list is not None and not date_list and not allow_empty:
raise Http404(
_("No %(verbose_name_plural)s available") % {
'verbose_name_plural': queryset.model._meta.verbose_name_plural,
}
)
return date_list
class BaseArchiveIndexView(BaseDateListView):
"""
Base class for archives of date-based items. Requires a response mixin.
"""
context_object_name = 'latest'
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
qs = self.get_dated_queryset()
date_list = self.get_date_list(qs, ordering='DESC')
if not date_list:
qs = qs.none()
return (date_list, qs, {})
class ArchiveIndexView(MultipleObjectTemplateResponseMixin, BaseArchiveIndexView):
"""Top-level archive of date-based items."""
template_name_suffix = '_archive'
class BaseYearArchiveView(YearMixin, BaseDateListView):
"""List of objects published in a given year."""
date_list_period = 'month'
make_object_list = False
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
year = self.get_year()
date_field = self.get_date_field()
date = _date_from_string(year, self.get_year_format())
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(self._get_next_year(date))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(**lookup_kwargs)
date_list = self.get_date_list(qs)
if not self.get_make_object_list():
# We need this to be a queryset since parent classes introspect it
# to find information about the model.
qs = qs.none()
return (date_list, qs, {
'year': date,
'next_year': self.get_next_year(date),
'previous_year': self.get_previous_year(date),
})
def get_make_object_list(self):
"""
Return `True` if this view should contain the full list of objects in
the given year.
"""
return self.make_object_list
class YearArchiveView(MultipleObjectTemplateResponseMixin, BaseYearArchiveView):
"""List of objects published in a given year."""
template_name_suffix = '_archive_year'
class BaseMonthArchiveView(YearMixin, MonthMixin, BaseDateListView):
"""List of objects published in a given month."""
date_list_period = 'day'
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
year = self.get_year()
month = self.get_month()
date_field = self.get_date_field()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format())
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(self._get_next_month(date))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(**lookup_kwargs)
date_list = self.get_date_list(qs)
return (date_list, qs, {
'month': date,
'next_month': self.get_next_month(date),
'previous_month': self.get_previous_month(date),
})
class MonthArchiveView(MultipleObjectTemplateResponseMixin, BaseMonthArchiveView):
"""List of objects published in a given month."""
template_name_suffix = '_archive_month'
class BaseWeekArchiveView(YearMixin, WeekMixin, BaseDateListView):
"""List of objects published in a given week."""
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
year = self.get_year()
week = self.get_week()
date_field = self.get_date_field()
week_format = self.get_week_format()
week_start = {
'%W': '1',
'%U': '0',
}[week_format]
date = _date_from_string(year, self.get_year_format(),
week_start, '%w',
week, week_format)
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(self._get_next_week(date))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(**lookup_kwargs)
return (None, qs, {
'week': date,
'next_week': self.get_next_week(date),
'previous_week': self.get_previous_week(date),
})
class WeekArchiveView(MultipleObjectTemplateResponseMixin, BaseWeekArchiveView):
"""List of objects published in a given week."""
template_name_suffix = '_archive_week'
class BaseDayArchiveView(YearMixin, MonthMixin, DayMixin, BaseDateListView):
"""List of objects published on a given day."""
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
year = self.get_year()
month = self.get_month()
day = self.get_day()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format(),
day, self.get_day_format())
return self._get_dated_items(date)
def _get_dated_items(self, date):
"""
Do the actual heavy lifting of getting the dated items; this accepts a
date object so that TodayArchiveView can be trivial.
"""
lookup_kwargs = self._make_single_date_lookup(date)
qs = self.get_dated_queryset(**lookup_kwargs)
return (None, qs, {
'day': date,
'previous_day': self.get_previous_day(date),
'next_day': self.get_next_day(date),
'previous_month': self.get_previous_month(date),
'next_month': self.get_next_month(date)
})
class DayArchiveView(MultipleObjectTemplateResponseMixin, BaseDayArchiveView):
"""List of objects published on a given day."""
template_name_suffix = "_archive_day"
class BaseTodayArchiveView(BaseDayArchiveView):
"""List of objects published today."""
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
return self._get_dated_items(datetime.date.today())
class TodayArchiveView(MultipleObjectTemplateResponseMixin, BaseTodayArchiveView):
"""List of objects published today."""
template_name_suffix = "_archive_day"
class BaseDateDetailView(YearMixin, MonthMixin, DayMixin, DateMixin, BaseDetailView):
"""
Detail view of a single object on a single date; this differs from the
standard DetailView by accepting a year/month/day in the URL.
"""
def get_object(self, queryset=None):
"""Get the object this request displays."""
year = self.get_year()
month = self.get_month()
day = self.get_day()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format(),
day, self.get_day_format())
# Use a custom queryset if provided
qs = self.get_queryset() if queryset is None else queryset
if not self.get_allow_future() and date > datetime.date.today():
raise Http404(_(
"Future %(verbose_name_plural)s not available because "
"%(class_name)s.allow_future is False."
) % {
'verbose_name_plural': qs.model._meta.verbose_name_plural,
'class_name': self.__class__.__name__,
})
# Filter down a queryset from self.queryset using the date from the
# URL. This'll get passed as the queryset to DetailView.get_object,
# which'll handle the 404
lookup_kwargs = self._make_single_date_lookup(date)
qs = qs.filter(**lookup_kwargs)
return super().get_object(queryset=qs)
class DateDetailView(SingleObjectTemplateResponseMixin, BaseDateDetailView):
"""
Detail view of a single object on a single date; this differs from the
standard DetailView by accepting a year/month/day in the URL.
"""
template_name_suffix = '_detail'
def _date_from_string(year, year_format, month='', month_format='', day='', day_format='', delim='__'):
"""
Get a datetime.date object given a format string and a year, month, and day
(only year is mandatory). Raise a 404 for an invalid date.
"""
format = year_format + delim + month_format + delim + day_format
datestr = str(year) + delim + str(month) + delim + str(day)
try:
return datetime.datetime.strptime(datestr, format).date()
except ValueError:
raise Http404(_("Invalid date string '%(datestr)s' given format '%(format)s'") % {
'datestr': datestr,
'format': format,
})
def _get_next_prev(generic_view, date, is_previous, period):
"""
Get the next or the previous valid date. The idea is to allow links on
month/day views to never be 404s by never providing a date that'll be
invalid for the given view.
This is a bit complicated since it handles different intervals of time,
hence the coupling to generic_view.
However in essence the logic comes down to:
* If allow_empty and allow_future are both true, this is easy: just
return the naive result (just the next/previous day/week/month,
regardless of object existence.)
* If allow_empty is true, allow_future is false, and the naive result
isn't in the future, then return it; otherwise return None.
* If allow_empty is false and allow_future is true, return the next
date *that contains a valid object*, even if it's in the future. If
there are no next objects, return None.
* If allow_empty is false and allow_future is false, return the next
date that contains a valid object. If that date is in the future, or
if there are no next objects, return None.
"""
date_field = generic_view.get_date_field()
allow_empty = generic_view.get_allow_empty()
allow_future = generic_view.get_allow_future()
get_current = getattr(generic_view, '_get_current_%s' % period)
get_next = getattr(generic_view, '_get_next_%s' % period)
# Bounds of the current interval
start, end = get_current(date), get_next(date)
# If allow_empty is True, the naive result will be valid
if allow_empty:
if is_previous:
result = get_current(start - datetime.timedelta(days=1))
else:
result = end
if allow_future or result <= timezone_today():
return result
else:
return None
# Otherwise, we'll need to go to the database to look for an object
# whose date_field is at least (greater than/less than) the given
# naive result
else:
# Construct a lookup and an ordering depending on whether we're doing
# a previous date or a next date lookup.
if is_previous:
lookup = {'%s__lt' % date_field: generic_view._make_date_lookup_arg(start)}
ordering = '-%s' % date_field
else:
lookup = {'%s__gte' % date_field: generic_view._make_date_lookup_arg(end)}
ordering = date_field
# Filter out objects in the future if appropriate.
if not allow_future:
# Fortunately, to match the implementation of allow_future,
# we need __lte, which doesn't conflict with __lt above.
if generic_view.uses_datetime_field:
now = timezone.now()
else:
now = timezone_today()
lookup['%s__lte' % date_field] = now
qs = generic_view.get_queryset().filter(**lookup).order_by(ordering)
# Snag the first object from the queryset; if it doesn't exist that
# means there's no next/previous link available.
try:
result = getattr(qs[0], date_field)
except IndexError:
return None
# Convert datetimes to dates in the current time zone.
if generic_view.uses_datetime_field:
if settings.USE_TZ:
result = timezone.localtime(result)
result = result.date()
# Return the first day of the period.
return get_current(result)
def timezone_today():
"""Return the current date in the current time zone."""
if settings.USE_TZ:
return timezone.localdate()
else:
return datetime.date.today()
|
|
# Copyright (C) 2018 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import multiprocessing
import os
import select
import socket
import subprocess
import sys
import threading
from argparse import Namespace
from blinkpy.common import exit_codes
from blinkpy.common.path_finder import WEB_TESTS_LAST_COMPONENT
from blinkpy.common.path_finder import get_chromium_src_dir
from blinkpy.web_tests.port import base
from blinkpy.web_tests.port import driver
from blinkpy.web_tests.port import factory
from blinkpy.web_tests.port import linux
from blinkpy.web_tests.port import server_process
# Modules loaded dynamically in _import_fuchsia_runner().
# pylint: disable=invalid-name
fuchsia_target = None
qemu_target = None
symbolizer = None
# pylint: enable=invalid-name
# Imports Fuchsia runner modules. This is done dynamically only when FuchsiaPort
# is instantiated to avoid dependency on Fuchsia runner on other platforms.
def _import_fuchsia_runner():
sys.path.insert(0, os.path.join(get_chromium_src_dir(), 'build/fuchsia'))
# pylint: disable=import-error
# pylint: disable=invalid-name
# pylint: disable=redefined-outer-name
global aemu_target
import aemu_target
global _GetPathToBuiltinTarget, _LoadTargetClass
from common_args import _GetPathToBuiltinTarget, _LoadTargetClass
global device_target
import device_target
global fuchsia_target
import target as fuchsia_target
global qemu_target
import qemu_target
global symbolizer
import symbolizer
# pylint: enable=import-error
# pylint: enable=invalid-name
# pylint: disable=redefined-outer-name
# Path to the content shell package relative to the build directory.
CONTENT_SHELL_PACKAGE_PATH = 'gen/content/shell/content_shell/content_shell.far'
# HTTP path prefixes for the HTTP server.
# WEB_TEST_PATH_PREFIX should be matched to the local directory name of
# web_tests because some tests and test_runner find test root directory
# with it.
WEB_TESTS_PATH_PREFIX = '/third_party/blink/' + WEB_TESTS_LAST_COMPONENT
# Paths to the directory where the fonts are copied to. Must match the path in
# content/shell/app/blink_test_platform_support_fuchsia.cc .
FONTS_DEVICE_PATH = '/system/fonts'
PROCESS_START_TIMEOUT = 20
_log = logging.getLogger(__name__)
def _subprocess_log_thread(pipe, prefix):
try:
while True:
line = pipe.readline()
if not line:
return
_log.error('%s: %s', prefix, line)
finally:
pipe.close()
class SubprocessOutputLogger(object):
def __init__(self, process, prefix):
self._process = process
self._thread = threading.Thread(
target=_subprocess_log_thread, args=(process.stdout, prefix))
self._thread.daemon = True
self._thread.start()
def __del__(self):
self.close()
def close(self):
self._process.kill()
class _TargetHost(object):
def __init__(self, build_path, build_ids_path, ports_to_forward, target,
results_directory):
try:
self._pkg_repo = None
self._target = target
self._target.Start()
self._setup_target(build_path, build_ids_path, ports_to_forward,
results_directory)
except:
self.cleanup()
raise
def _setup_target(self, build_path, build_ids_path, ports_to_forward,
results_directory):
# Tell SSH to forward all server ports from the Fuchsia device to
# the host.
forwarding_flags = [
'-O',
'forward', # Send SSH mux control signal.
'-N', # Don't execute command
'-T' # Don't allocate terminal.
]
for port in ports_to_forward:
forwarding_flags += ['-R', '%d:localhost:%d' % (port, port)]
self._proxy = self._target.RunCommandPiped([],
ssh_args=forwarding_flags,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
package_path = os.path.join(build_path, CONTENT_SHELL_PACKAGE_PATH)
self._target.StartSystemLog([package_path])
self._pkg_repo = self._target.GetPkgRepo()
self._pkg_repo.__enter__()
self._target.InstallPackage([package_path])
# Process will be forked for each worker, which may make QemuTarget
# unusable (e.g. waitpid() for qemu process returns ECHILD after
# fork() ). Save command runner before fork()ing, to use it later to
# connect to the target.
self.target_command_runner = self._target.GetCommandRunner()
def run_command(self, command):
return self.target_command_runner.RunCommandPiped(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def cleanup(self):
try:
if self._pkg_repo:
self._pkg_repo.__exit__(None, None, None)
finally:
if self._target:
self._target.Stop()
class FuchsiaPort(base.Port):
port_name = 'fuchsia'
SUPPORTED_VERSIONS = ('fuchsia', )
FALLBACK_PATHS = {
'fuchsia':
['fuchsia'] + linux.LinuxPort.latest_platform_fallback_path()
}
def __init__(self, host, port_name, **kwargs):
super(FuchsiaPort, self).__init__(host, port_name, **kwargs)
self._operating_system = 'fuchsia'
self._version = 'fuchsia'
self._target_device = self.get_option('device')
self._architecture = 'x86_64' if self._target_cpu(
) == 'x64' else 'arm64'
self.server_process_constructor = FuchsiaServerProcess
# Used to implement methods that depend on the host platform.
self._host_port = factory.PortFactory(host).get(**kwargs)
self._target_host = self.get_option('fuchsia_target')
self._zircon_logger = None
self._host_ip = self.get_option('fuchsia_host_ip')
_import_fuchsia_runner()
def _driver_class(self):
return ChromiumFuchsiaDriver
def _path_to_driver(self, target=None):
return self._build_path_with_target(target, CONTENT_SHELL_PACKAGE_PATH)
def __del__(self):
if self._zircon_logger:
self._zircon_logger.close()
def _target_cpu(self):
return self.get_option('fuchsia_target_cpu')
def _cpu_cores(self):
# Revise the processor count on arm64, the trybots on arm64 are in
# dockers and cannot use all processors.
# For x64, fvdl always assumes hyperthreading is supported by intel
# processors, but the cpu_count returns the number regarding if the core
# is a physical one or a hyperthreading one, so the number should be
# divided by 2 to avoid creating more threads than the processor
# supports.
if self._target_cpu() == 'x64':
return max(int(multiprocessing.cpu_count() / 2) - 1, 4)
return 4
def setup_test_run(self):
super(FuchsiaPort, self).setup_test_run()
try:
target_args = Namespace(
out_dir=self._build_path(),
fuchsia_out_dir=self.get_option('fuchsia_out_dir'),
target_cpu=self._target_cpu(),
ssh_config=self.get_option('fuchsia_ssh_config'),
os_check='ignore',
host=self.get_option('fuchsia_host'),
port=self.get_option('fuchsia_port'),
node_name=self.get_option('fuchsia_node_name'),
cpu_cores=self._cpu_cores(),
require_kvm=True,
ram_size_mb=8192,
enable_graphics=False,
hardware_gpu=False,
with_network=False,
logs_dir=self.results_directory(),
custom_image=None)
target = _LoadTargetClass(
_GetPathToBuiltinTarget(
self._target_device)).CreateFromArgs(target_args)
self._target_host = _TargetHost(self._build_path(),
self.get_build_ids_path(),
self.SERVER_PORTS, target,
self.results_directory())
if self.get_option('zircon_logging'):
self._zircon_logger = SubprocessOutputLogger(
self._target_host.run_command(['dlog', '-f']), 'Zircon')
# Save fuchsia_target in _options, so it can be shared with other
# workers.
self._options.fuchsia_target = self._target_host
except fuchsia_target.FuchsiaTargetException as e:
_log.error('Failed to start qemu: %s.', str(e))
return exit_codes.NO_DEVICES_EXIT_STATUS
def clean_up_test_run(self):
if self._target_host:
self._target_host.cleanup()
self._target_host = None
def num_workers(self, requested_num_workers):
# Run a single qemu instance.
return min(self._cpu_cores(), requested_num_workers)
def _default_timeout_ms(self):
# Use 20s timeout instead of the default 6s. This is necessary because
# the tests are executed in qemu, so they run slower compared to other
# platforms.
return 20000
def requires_http_server(self):
"""HTTP server is always required to avoid copying the tests to the VM.
"""
return True
def start_http_server(self, additional_dirs, number_of_drivers):
additional_dirs['/third_party/blink/PerformanceTests'] = \
self._perf_tests_dir()
additional_dirs[WEB_TESTS_PATH_PREFIX] = self.web_tests_dir()
additional_dirs['/gen'] = self.generated_sources_directory()
additional_dirs['/third_party/blink'] = \
self._path_from_chromium_base('third_party', 'blink')
super(FuchsiaPort, self).start_http_server(additional_dirs,
number_of_drivers)
def path_to_apache(self):
return self._host_port.path_to_apache()
def path_to_apache_config_file(self):
return self._host_port.path_to_apache_config_file()
def default_smoke_test_only(self):
return True
def get_target_host(self):
return self._target_host
def get_build_ids_path(self):
package_path = self._path_to_driver()
return os.path.join(os.path.dirname(package_path), 'ids.txt')
class ChromiumFuchsiaDriver(driver.Driver):
def __init__(self, port, worker_number, no_timeout=False):
super(ChromiumFuchsiaDriver, self).__init__(port, worker_number,
no_timeout)
def _initialize_server_process(self, server_name, cmd_line, environment):
self._server_process = self._port.server_process_constructor(
self._port,
server_name,
cmd_line,
environment,
more_logging=self._port.get_option('driver_logging'),
host_ip=self._port._host_ip)
def _base_cmd_line(self):
cmd = [
'run',
'fuchsia-pkg://fuchsia.com/content_shell#meta/content_shell.cmx'
]
if self._port._target_device == 'qemu':
cmd.append('--ozone-platform=headless')
# Use Scenic on AEMU
else:
cmd.extend([
'--ozone-platform=scenic', '--enable-oop-rasterization',
'--use-vulkan', '--enable-gpu-rasterization',
'--force-device-scale-factor=1', '--use-gl=stub',
'--enable-features=UseSkiaRenderer,Vulkan',
'--gpu-watchdog-timeout-seconds=60'
])
return cmd
def _command_from_driver_input(self, driver_input):
command = super(ChromiumFuchsiaDriver,
self)._command_from_driver_input(driver_input)
if command.startswith('/'):
relative_test_filename = \
os.path.relpath(command, self._port.web_tests_dir())
command = 'http://127.0.0.1:8000' + WEB_TESTS_PATH_PREFIX + \
'/' + relative_test_filename
return command
# Custom version of ServerProcess that runs processes on a remote device.
class FuchsiaServerProcess(server_process.ServerProcess):
def __init__(self,
port_obj,
name,
cmd,
env=None,
treat_no_data_as_crash=False,
more_logging=False,
host_ip=None):
super(FuchsiaServerProcess, self).__init__(
port_obj, name, cmd, env, treat_no_data_as_crash, more_logging)
self._symbolizer_proc = None
self._host_ip = host_ip or qemu_target.HOST_IP_ADDRESS
def _start(self):
if self._proc:
raise ValueError('%s already running' % self._name)
self._reset()
# Fuchsia doesn't support stdin stream for packaged applications, so the
# stdin stream for content_shell is routed through a separate TCP
# socket. Open a local socket and then pass the address with the port as
# --stdin-redirect parameter. content_shell will connect to this address
# and will use that connection as its stdin stream.
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.bind(('127.0.0.1', 0))
listen_socket.listen(1)
stdin_port = listen_socket.getsockname()[1]
command = ['%s=%s' % (k, v) for k, v in self._env.items()] + \
self._cmd + \
['--no-sandbox', '--stdin-redirect=%s:%s' %
(self._host_ip, stdin_port)]
proc = self._port.get_target_host().run_command(command)
# Wait for incoming connection from content_shell.
fd = listen_socket.fileno()
read_fds, _, _ = select.select([fd], [], [], PROCESS_START_TIMEOUT)
if fd not in read_fds:
listen_socket.close()
proc.kill()
raise driver.DeviceFailure(
'Timed out waiting connection from content_shell.')
# Python's interfaces for sockets and pipes are different. To masquerade
# the socket as a pipe dup() the file descriptor and pass it to
# os.fdopen().
stdin_socket, _ = listen_socket.accept()
fd = stdin_socket.fileno() # pylint: disable=no-member
stdin_pipe = os.fdopen(os.dup(fd), "wb", 0)
stdin_socket.close()
proc.stdin.close()
proc.stdin = stdin_pipe
# Run symbolizer to filter the stderr stream.
self._symbolizer_proc = symbolizer.RunSymbolizer(
proc.stderr, subprocess.PIPE, [self._port.get_build_ids_path()])
proc.stderr = self._symbolizer_proc.stdout
self._set_proc(proc)
def stop(self, timeout_secs=0.0, kill_tree=False):
result = super(FuchsiaServerProcess, self).stop(
timeout_secs, kill_tree)
if self._symbolizer_proc:
self._symbolizer_proc.kill()
return result
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
from base64 import b64encode
from collections import namedtuple
from errno import ENOENT
from functools import partial
from glob import glob
import hashlib
from itertools import chain
import json
from logging import getLogger
from os import listdir
from os.path import isdir, isfile, join
import conda_package_handling.api
from .link import islink, lexists
from .create import TemporaryDirectory
from ..._vendor.auxlib.collection import first
from ..._vendor.auxlib.compat import shlex_split_unicode
from ..._vendor.auxlib.ish import dals
from ...base.constants import PREFIX_PLACEHOLDER
from ...common.compat import open
from ...common.pkg_formats.python import (
PythonDistribution, PythonEggInfoDistribution, PythonEggLinkDistribution,
PythonInstalledDistribution,
)
from ...exceptions import CondaUpgradeError, CondaVerificationError, PathNotFoundError
from ...models.channel import Channel
from ...models.enums import FileMode, PackageType, PathType
from ...models.package_info import PackageInfo, PackageMetadata
from ...models.records import PathData, PathDataV1, PathsData, PrefixRecord
log = getLogger(__name__)
listdir = listdir
lexists, isdir, isfile = lexists, isdir, isfile
def yield_lines(path):
"""Generator function for lines in file. Empty generator if path does not exist.
Args:
path (str): path to file
Returns:
iterator: each line in file, not starting with '#'
"""
try:
with open(path) as fh:
for line in fh:
line = line.strip()
if not line or line.startswith('#'):
continue
yield line
except (IOError, OSError) as e:
if e.errno == ENOENT:
pass
else:
raise
def _digest_path(algo, path):
if not isfile(path):
raise PathNotFoundError(path)
hasher = hashlib.new(algo)
with open(path, "rb") as fh:
for chunk in iter(partial(fh.read, 8192), b''):
hasher.update(chunk)
return hasher.hexdigest()
def compute_md5sum(file_full_path):
return _digest_path('md5', file_full_path)
def compute_sha256sum(file_full_path):
return _digest_path('sha256', file_full_path)
def find_first_existing(*globs):
for g in globs:
for path in glob(g):
if lexists(path):
return path
return None
# ####################################################
# functions supporting read_package_info()
# ####################################################
def read_package_info(record, package_cache_record):
epd = package_cache_record.extracted_package_dir
icondata = read_icondata(epd)
package_metadata = read_package_metadata(epd)
paths_data = read_paths_json(epd)
return PackageInfo(
extracted_package_dir=epd,
package_tarball_full_path=package_cache_record.package_tarball_full_path,
channel=Channel(record.schannel or record.channel),
repodata_record=record,
url=package_cache_record.url,
icondata=icondata,
package_metadata=package_metadata,
paths_data=paths_data,
)
def read_index_json(extracted_package_directory):
with open(join(extracted_package_directory, 'info', 'index.json')) as fi:
return json.load(fi)
def read_index_json_from_tarball(package_tarball_full_path):
with TemporaryDirectory() as tmpdir:
conda_package_handling.api.extract(package_tarball_full_path, tmpdir, 'info')
with open(join(tmpdir, 'info', 'index.json')) as f:
json_data = json.load(f)
return json_data
def read_repodata_json(extracted_package_directory):
with open(join(extracted_package_directory, 'info', 'repodata_record.json')) as fi:
return json.load(fi)
def read_icondata(extracted_package_directory):
icon_file_path = join(extracted_package_directory, 'info', 'icon.png')
if isfile(icon_file_path):
with open(icon_file_path, 'rb') as f:
data = f.read()
return b64encode(data).decode('utf-8')
else:
return None
def read_package_metadata(extracted_package_directory):
def _paths():
yield join(extracted_package_directory, 'info', 'link.json')
yield join(extracted_package_directory, 'info', 'package_metadata.json')
path = first(_paths(), key=isfile)
if not path:
return None
else:
with open(path, 'r') as f:
data = json.loads(f.read())
if data.get('package_metadata_version') != 1:
raise CondaUpgradeError(dals("""
The current version of conda is too old to install this package. (This version
only supports link.json schema version 1.) Please update conda to install
this package.
"""))
package_metadata = PackageMetadata(**data)
return package_metadata
def read_paths_json(extracted_package_directory):
info_dir = join(extracted_package_directory, 'info')
paths_json_path = join(info_dir, 'paths.json')
if isfile(paths_json_path):
with open(paths_json_path) as paths_json:
data = json.load(paths_json)
if data.get('paths_version') != 1:
raise CondaUpgradeError(dals("""
The current version of conda is too old to install this package. (This version
only supports paths.json schema version 1.) Please update conda to install
this package."""))
paths_data = PathsData(
paths_version=1,
paths=(PathDataV1(**f) for f in data['paths']),
)
else:
has_prefix_files = read_has_prefix(join(info_dir, 'has_prefix'))
no_link = read_no_link(info_dir)
def read_files_file():
files_path = join(info_dir, 'files')
for f in (ln for ln in (line.strip() for line in yield_lines(files_path)) if ln):
path_info = {"_path": f}
if f in has_prefix_files.keys():
path_info["prefix_placeholder"] = has_prefix_files[f][0]
path_info["file_mode"] = has_prefix_files[f][1]
if f in no_link:
path_info["no_link"] = True
if islink(join(extracted_package_directory, f)):
path_info["path_type"] = PathType.softlink
else:
path_info["path_type"] = PathType.hardlink
yield PathData(**path_info)
paths = tuple(read_files_file())
paths_data = PathsData(
paths_version=0,
paths=paths,
)
return paths_data
def read_has_prefix(path):
"""
reads `has_prefix` file and return dict mapping filepaths to tuples(placeholder, FileMode)
A line in `has_prefix` contains one of
* filepath
* placeholder mode filepath
mode values are one of
* text
* binary
"""
ParseResult = namedtuple('ParseResult', ('placeholder', 'filemode', 'filepath'))
def parse_line(line):
# placeholder, filemode, filepath
parts = tuple(x.strip('"\'') for x in shlex_split_unicode(line, posix=False))
if len(parts) == 1:
return ParseResult(PREFIX_PLACEHOLDER, FileMode.text, parts[0])
elif len(parts) == 3:
return ParseResult(parts[0], FileMode(parts[1]), parts[2])
else:
raise CondaVerificationError("Invalid has_prefix file at path: %s" % path)
parsed_lines = (parse_line(line) for line in yield_lines(path))
return {pr.filepath: (pr.placeholder, pr.filemode) for pr in parsed_lines}
def read_no_link(info_dir):
return set(chain(yield_lines(join(info_dir, 'no_link')),
yield_lines(join(info_dir, 'no_softlink'))))
def read_soft_links(extracted_package_directory, files):
return tuple(f for f in files if islink(join(extracted_package_directory, f)))
def read_python_record(prefix_path, anchor_file, python_version):
"""
Convert a python package defined by an anchor file (Metadata information)
into a conda prefix record object.
"""
pydist = PythonDistribution.init(prefix_path, anchor_file, python_version)
depends, constrains = pydist.get_conda_dependencies()
if isinstance(pydist, PythonInstalledDistribution):
channel = Channel("pypi")
build = "pypi_0"
package_type = PackageType.VIRTUAL_PYTHON_WHEEL
paths_tups = pydist.get_paths()
paths_data = PathsData(paths_version=1, paths=(
PathDataV1(
_path=path, path_type=PathType.hardlink, sha256=checksum, size_in_bytes=size
) for (path, checksum, size) in paths_tups
))
files = tuple(p[0] for p in paths_tups)
elif isinstance(pydist, PythonEggLinkDistribution):
channel = Channel("<develop>")
build = "dev_0"
package_type = PackageType.VIRTUAL_PYTHON_EGG_LINK
paths_data, files = PathsData(paths_version=1, paths=()), ()
elif isinstance(pydist, PythonEggInfoDistribution):
channel = Channel("pypi")
build = "pypi_0"
if pydist.is_manageable:
package_type = PackageType.VIRTUAL_PYTHON_EGG_MANAGEABLE
paths_tups = pydist.get_paths()
files = tuple(p[0] for p in paths_tups)
paths_data = PathsData(paths_version=1, paths=(
PathData(_path=path, path_type=PathType.hardlink) for path in files
))
else:
package_type = PackageType.VIRTUAL_PYTHON_EGG_UNMANAGEABLE
paths_data, files = PathsData(paths_version=1, paths=()), ()
else:
raise NotImplementedError()
return PrefixRecord(
package_type=package_type,
name=pydist.conda_name,
version=pydist.version,
channel=channel,
subdir="pypi",
fn=pydist.sp_reference,
build=build,
build_number=0,
paths_data=paths_data,
files=files,
depends=depends,
constrains=constrains,
)
|
|
# -*- coding: utf-8 -*-
"""
=============================================
I/O functions (:mod:`sknano.core._io`)
=============================================
.. currentmodule:: sknano.core._io
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
__docformat__ = 'restructuredtext en'
import json
import os
import re
import sys
try:
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
except ImportError:
yaml = None
__all__ = ['get_fname', 'get_fpath', 'listdir_dirnames', 'listdir_fnames',
'listdir', 'loadobj', 'dumpobj']
def get_fname(fname=None, ext=None, outpath=os.getcwd(), overwrite=False,
add_fnum=True, fnum=None, verbose=False, **kwargs):
"""Generate modified `fname` string based on chosen parameters.
Parameters
----------
fname : str
Name of file, with or without an extension.
ext : str, optional
File extension to append to `fname`. If `ext` is None,
then `fname` is analyzed to see if it likely already has an
extension. An extension is set to the
last element in the list of strings returned by
`fname.split('.')` **if** this list has more than 1 element.
Otherwise, `ext` will be set to an empty string `''`.
If `ext` is not None and is a valid string,
then `fname` is analyzed to see if it already ends with `ext`.
If `fname.endswith(ext)` is `True` from the start, then `ext` will
not be duplicately appended.
outpath : str, optional
Absolute or relative path for generated output file.
Default is the absolute path returned by `os.getcwd()`.
overwrite : bool, optional
If `True`, overwrite an existing file if it has the same generated
file path.
add_fnum : bool, optional
Append integer number to output file name, starting with **1**.
fnum : {None, int}, optional
Starting file number to append if `add_fnum` is `True`.
.. note::
If the generated file path exists and `overwrite` is False,
setting this parameter has no effect.
verbose : bool, optional
Show verbose output.
Returns
-------
fname : str
Updated `fname`.
"""
return get_fpath(fname=fname, ext=ext, outpath=outpath,
overwrite=overwrite, add_fnum=add_fnum, fnum=fnum,
fname_only=True, verbose=verbose)
def get_fpath(fname=None, ext=None, outpath=os.getcwd(), overwrite=False,
add_fnum=True, fnum=None, include_fname=False, fname_only=False,
verbose=False):
"""Generate absolute path to modified `fname`.
Parameters
----------
fname : str
Name of file, with or without an extension.
ext : str, optional
File extension to append to `fname`. If `ext` is None,
then `fname` is analyzed to see if it likely already has an
extension. An extension is set to the
last element in the list of strings returned by
`fname.split('.')` **if** this list has more than 1 element.
Otherwise, `ext` will be set to an empty string `''`.
If `ext` is not None and is a valid string,
then `fname` is analyzed to see if it already ends with `ext`.
If `fname.endswith(ext)` is `True` from the start, then `ext` will
not be duplicately appended.
outpath : str, optional
Absolute or relative path for generated output file.
Default is the absolute path returned by `os.getcwd()`.
overwrite : bool, optional
If `True`, overwrite an existing file if it has the same generated
file path.
add_fnum : bool, optional
Append integer number to output file name, starting with **1**.
fnum : {None, int}, optional
Starting file number to append if `add_fnum` is `True`.
.. note::
If the generated file path exists and `overwrite` is False,
setting this parameter has no effect.
include_fname : bool, optional
If `True`, return `(fpath, fname)` tuple.
fname_only : bool, optional
If `True`, return only `fname`.
verbose : bool, optional
Show verbose output.
Returns
-------
fpath : str
The concatenation of `outpath` followed by the updated `fname`.
(fpath, fname) : tuple (only if `include_fname` is `True`)
2-tuple of strings `(fpath, fname)`.
fname : str (only if `fname_only` is `True`)
Updated `fname`.
"""
f = None
if fname is None or fname == '':
error_msg = '`fname` must be a string at least 1 character long.'
if fname is None:
raise TypeError(error_msg)
else:
raise ValueError(error_msg)
else:
f = fname
fsplit = f.split('.')
if ext is None:
if len(fsplit) > 1:
ext = '.' + fsplit[-1]
else:
ext = ''
else:
# check if extension already starts with a '.'
if not ext.startswith('.'):
ext = '.' + ext
# check if file name already ends with extension.
if f.split('.')[-1] != ext.split('.')[-1]:
f += ext
if add_fnum:
fname = re.split(ext, f)[0]
if fnum is not None:
f = fname + '-{:d}'.format(fnum) + ext
else:
f = fname + '-1' + ext
fpath = None
if outpath is None:
outpath = os.getcwd()
try:
os.makedirs(outpath)
except OSError:
if os.path.isdir(outpath):
pass
else:
outpath = os.curdir
finally:
fname = f
fpath = os.path.join(outpath, fname)
if os.path.isfile(fpath):
if overwrite:
try:
os.remove(fpath)
except OSError as e:
print(e)
sys.exit(1)
else:
if verbose:
print('overwriting existing file: {}'.format(fname))
else:
if add_fnum:
while os.path.isfile(fpath):
fname = \
'-'.join(re.split('-', re.split(ext, f)[0])[:-1])
fnum = re.split('-', re.split(ext, f)[0])[-1]
f = fname + '-' + str(int(fnum) + 1) + ext
fpath = os.path.join(outpath, f)
fname = f
else:
print('file exists: {}\n'.format(fpath))
print('Set `add_fnum=True` to generate unique\n' +
'`fname` or `overwrite=True` to overwrite\n' +
'existing file.')
fpath = None
if verbose:
print('Generated file name: {}'.format(fname))
print('File path: {}'.format(fpath))
if fname_only:
return fname
elif include_fname:
return fpath, fname
else:
return fpath
def listdir_dirnames(path='.', filterfunc=None, include_path=False):
"""Return list of names of directories in the directory given by `path`.
Parameters
----------
path : :class:`~python:str`, optional
filterfunc : `function`, optional
include_path : :class:`~python:bool`, optional
Returns
-------
fnames : :class:`~python:list`
:class:`~python:list` of names of directories in `path`.
"""
return listdir(path, filterfunc=filterfunc,
filter_dirnames=filterfunc is not None,
include_path=include_path)[0]
def listdir_fnames(path='.', filterfunc=None, include_path=False):
"""Return list of names of files in the directory given by `path`.
Parameters
----------
path : :class:`~python:str`, optional
filterfunc : `function`, optional
include_path : :class:`~python:bool`, optional
Returns
-------
fnames : :class:`~python:list`
:class:`~python:list` of names of files in `path`.
"""
return listdir(path, filterfunc=filterfunc,
filter_fnames=filterfunc is not None,
include_path=include_path)[-1]
def listdir(path='.', filterfunc=None, filter_dirnames=False,
filter_fnames=False, include_path=False):
"""Return a tuple of the names of the directories and files in the
directory given by `path`.
Parameters
----------
path : :class:`~python:str`, optional
filterfunc : `function`, optional
filter_dirnames : :class:`~python:bool`, optional
filter_fnames : :class:`~python:bool`, optional
include_path : :class:`~python:bool`, optional
Returns
-------
(dirnames, fnames) : :class:`~python:tuple`
:class:`~python:tuple` of names of directories and files in `path`.
"""
l = os.listdir(path)
dirnames = \
[name for name in l if os.path.isdir(os.path.join(path, name))]
fnames = \
[name for name in l if os.path.isfile(os.path.join(path, name))]
if filter_dirnames and filterfunc is not None:
dirnames = list(filter(filterfunc, dirnames))
if filter_fnames and filterfunc is not None:
fnames = list(filter(filterfunc, fnames))
if include_path:
dirnames = [os.path.join(path, name) for name in dirnames]
fnames = [os.path.join(path, name) for name in fnames]
return dirnames, fnames
def loadobj(fn, *args, **kwargs):
with open(fn) as fp:
if fn.lower().endswith(("yaml", "yml")):
if "Loader" not in kwargs:
kwargs["Loader"] = Loader
return yaml.load(fp, *args, **kwargs)
else:
return json.load(fp, *args, **kwargs)
def dumpobj(obj, fn, *args, **kwargs):
with open(fn, 'wt') as fp:
if fn.lower().endswith(("yaml", "yml")):
if "Dumper" not in kwargs:
kwargs["Dumper"] = Dumper
yaml.dump(obj, fp, *args, **kwargs)
else:
json.dump(obj, fp, *args, **kwargs)
|
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Portable libraries for execution related APIs."""
import collections
import itertools
import re
from typing import Dict, Iterable, List, Mapping, Optional, Sequence, Set, Tuple
from absl import logging
from tfx import types
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.portable.mlmd import common_utils
from tfx.orchestration.portable.mlmd import event_lib
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import artifact_utils
from tfx.utils import proto_utils
from tfx.utils import typing_utils
from google.protobuf import json_format
from ml_metadata.proto import metadata_store_pb2
_EXECUTION_RESULT = '__execution_result__'
_PROPERTY_SCHEMA_PREFIX = '__schema__'
_PROPERTY_SCHEMA_SUFFIX = '__'
def is_execution_successful(execution: metadata_store_pb2.Execution) -> bool:
"""Whether or not an execution is successful.
Args:
execution: An execution message.
Returns:
A bool value indicating whether or not the execution is successful.
"""
return (execution.last_known_state == metadata_store_pb2.Execution.COMPLETE or
execution.last_known_state == metadata_store_pb2.Execution.CACHED)
def is_execution_active(execution: metadata_store_pb2.Execution) -> bool:
"""Returns `True` if an execution is active.
Args:
execution: An execution message.
Returns:
A bool value indicating whether or not the execution is active.
"""
return (execution.last_known_state == metadata_store_pb2.Execution.NEW or
execution.last_known_state == metadata_store_pb2.Execution.RUNNING)
def is_internal_key(key: str) -> bool:
"""Returns `True` if the key is an internal-only execution property key."""
return key.startswith('__')
def is_schema_key(key: str) -> bool:
"""Returns `True` if the input key corresponds to a schema stored in execution property."""
return re.fullmatch(r'^__schema__.*__$', key) is not None
def get_schema_key(key: str) -> str:
"""Returns key for storing execution property schema."""
return _PROPERTY_SCHEMA_PREFIX + key + _PROPERTY_SCHEMA_SUFFIX
def sort_executions_newest_to_oldest(
executions: Iterable[metadata_store_pb2.Execution]
) -> List[metadata_store_pb2.Execution]:
"""Returns MLMD executions in sorted order, newest to oldest.
Args:
executions: An iterable of MLMD executions.
Returns:
Executions sorted newest to oldest (based on MLMD execution creation time).
"""
return sorted(
executions, key=lambda e: e.create_time_since_epoch, reverse=True)
def prepare_execution(
metadata_handler: metadata.Metadata,
execution_type: metadata_store_pb2.ExecutionType,
state: metadata_store_pb2.Execution.State,
exec_properties: Optional[Mapping[str, types.ExecPropertyTypes]] = None,
execution_name: str = '',
) -> metadata_store_pb2.Execution:
"""Creates an execution proto based on the information provided.
Args:
metadata_handler: A handler to access MLMD store.
execution_type: A metadata_pb2.ExecutionType message describing the type of
the execution.
state: The state of the execution.
exec_properties: Execution properties that need to be attached.
execution_name: Name of the execution.
Returns:
A metadata_store_pb2.Execution message.
"""
execution = metadata_store_pb2.Execution()
execution.last_known_state = state
execution.type_id = common_utils.register_type_if_not_exist(
metadata_handler, execution_type).id
if execution_name:
execution.name = execution_name
exec_properties = exec_properties or {}
# For every execution property, put it in execution.properties if its key is
# in execution type schema. Otherwise, put it in execution.custom_properties.
for k, v in exec_properties.items():
value = pipeline_pb2.Value()
value = data_types_utils.set_parameter_value(value, v)
if value.HasField('schema'):
# Stores schema in custom_properties for non-primitive types to allow
# parsing in later stages.
data_types_utils.set_metadata_value(
execution.custom_properties[get_schema_key(k)],
proto_utils.proto_to_json(value.schema))
if (execution_type.properties.get(k) ==
data_types_utils.get_metadata_value_type(v)):
execution.properties[k].CopyFrom(value.field_value)
else:
execution.custom_properties[k].CopyFrom(value.field_value)
logging.debug('Prepared EXECUTION:\n %s', execution)
return execution
def _create_artifact_and_event_pairs(
metadata_handler: metadata.Metadata,
artifact_dict: typing_utils.ArtifactMultiMap,
event_type: metadata_store_pb2.Event.Type,
) -> List[Tuple[metadata_store_pb2.Artifact, metadata_store_pb2.Event]]:
"""Creates a list of [Artifact, Event] tuples.
The result of this function will be used in a MLMD put_execution() call.
Args:
metadata_handler: A handler to access MLMD store.
artifact_dict: The source of artifacts to work on. For each unique artifact
in the dict, creates a tuple for that. Note that all artifacts of the same
key in the artifact_dict are expected to share the same artifact type. If
the same artifact is used for multiple keys, several event paths will be
generated for the same event.
event_type: The event type of the event to be attached to the artifact
Returns:
A list of [Artifact, Event] tuples
"""
result = []
artifact_event_map = dict()
for key, artifact_list in artifact_dict.items():
artifact_type = None
for index, artifact in enumerate(artifact_list):
if (artifact.mlmd_artifact.HasField('id') and
artifact.id in artifact_event_map):
event_lib.add_event_path(
artifact_event_map[artifact.id][1], key=key, index=index)
else:
# TODO(b/153904840): If artifact id is present, skip putting the
# artifact into the pair when MLMD API is ready.
event = event_lib.generate_event(
event_type=event_type, key=key, index=index)
# Reuses already registered type in the same list whenever possible as
# the artifacts in the same list share the same artifact type.
if artifact_type:
assert artifact_type.name == artifact.artifact_type.name, (
'Artifacts under the same key should share the same artifact '
'type.')
artifact_type = common_utils.register_type_if_not_exist(
metadata_handler, artifact.artifact_type)
artifact.set_mlmd_artifact_type(artifact_type)
if artifact.mlmd_artifact.HasField('id'):
artifact_event_map[artifact.id] = (artifact.mlmd_artifact, event)
else:
result.append((artifact.mlmd_artifact, event))
result.extend(list(artifact_event_map.values()))
return result
def put_execution(
metadata_handler: metadata.Metadata,
execution: metadata_store_pb2.Execution,
contexts: Sequence[metadata_store_pb2.Context],
input_artifacts: Optional[typing_utils.ArtifactMultiMap] = None,
output_artifacts: Optional[typing_utils.ArtifactMultiMap] = None,
input_event_type: metadata_store_pb2.Event.Type = metadata_store_pb2.Event
.INPUT,
output_event_type: metadata_store_pb2.Event.Type = metadata_store_pb2.Event
.OUTPUT
) -> metadata_store_pb2.Execution:
"""Writes an execution-centric subgraph to MLMD.
This function mainly leverages metadata.put_execution() method to write the
execution centric subgraph to MLMD.
Args:
metadata_handler: A handler to access MLMD.
execution: The execution to be written to MLMD.
contexts: MLMD contexts to associated with the execution.
input_artifacts: Input artifacts of the execution. Each artifact will be
linked with the execution through an event with type input_event_type.
Each artifact will also be linked with every context in the `contexts`
argument.
output_artifacts: Output artifacts of the execution. Each artifact will be
linked with the execution through an event with type output_event_type.
Each artifact will also be linked with every context in the `contexts`
argument.
input_event_type: The type of the input event, default to be INPUT.
output_event_type: The type of the output event, default to be OUTPUT.
Returns:
An MLMD execution that is written to MLMD, with id pupulated.
"""
artifact_and_events = []
if input_artifacts:
artifact_and_events.extend(
_create_artifact_and_event_pairs(
metadata_handler=metadata_handler,
artifact_dict=input_artifacts,
event_type=input_event_type))
if output_artifacts:
artifact_and_events.extend(
_create_artifact_and_event_pairs(
metadata_handler=metadata_handler,
artifact_dict=output_artifacts,
event_type=output_event_type))
execution_id, artifact_ids, contexts_ids = (
metadata_handler.store.put_execution(
execution=execution,
artifact_and_events=artifact_and_events,
contexts=contexts,
reuse_context_if_already_exist=True))
execution.id = execution_id
for artifact_and_event, a_id in zip(artifact_and_events, artifact_ids):
artifact, _ = artifact_and_event
artifact.id = a_id
for context, c_id in zip(contexts, contexts_ids):
context.id = c_id
return execution
def get_executions_associated_with_all_contexts(
metadata_handler: metadata.Metadata,
contexts: Iterable[metadata_store_pb2.Context]
) -> List[metadata_store_pb2.Execution]:
"""Returns executions that are associated with all given contexts.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts for which to fetch associated executions.
Returns:
A list of executions associated with all given contexts.
"""
executions_dict = None
for context in contexts:
executions = metadata_handler.store.get_executions_by_context(context.id)
if executions_dict is None:
executions_dict = {e.id: e for e in executions}
else:
executions_dict = {e.id: e for e in executions if e.id in executions_dict}
return list(executions_dict.values()) if executions_dict else []
def get_artifact_ids_by_event_type_for_execution_id(
metadata_handler: metadata.Metadata,
execution_id: int) -> Dict['metadata_store_pb2.Event.Type', Set[int]]:
"""Returns artifact ids corresponding to the execution id grouped by event type.
Args:
metadata_handler: A handler to access MLMD.
execution_id: Id of the execution for which to get artifact ids.
Returns:
A `dict` mapping event type to `set` of artifact ids.
"""
events = metadata_handler.store.get_events_by_execution_ids([execution_id])
result = collections.defaultdict(set)
for event in events:
result[event.type].add(event.artifact_id)
return result
def get_artifacts_dict(
metadata_handler: metadata.Metadata, execution_id: int,
event_types: 'List[metadata_store_pb2.Event.Type]'
) -> typing_utils.ArtifactMultiDict:
"""Returns a map from key to an ordered list of artifacts for the given execution id.
The dict is constructed purely from information stored in MLMD for the
execution given by `execution_id`. The "key" is the tag associated with the
`InputSpec` or `OutputSpec` in the pipeline IR.
Args:
metadata_handler: A handler to access MLMD.
execution_id: Id of the execution for which to get artifacts.
event_types: Event types to filter by.
Returns:
A dict mapping key to an ordered list of artifacts.
Raises:
ValueError: If the events are badly formed and correct ordering of
artifacts cannot be determined or if all the artifacts could not be
fetched from MLMD.
"""
events = metadata_handler.store.get_events_by_execution_ids([execution_id])
# Create a map from "key" to list of (index, artifact_id)s.
indexed_artifact_ids_dict = collections.defaultdict(list)
for event in events:
if event.type not in event_types:
continue
key, index = event_lib.get_artifact_path(event)
artifact_id = event.artifact_id
indexed_artifact_ids_dict[key].append((index, artifact_id))
# Create a map from "key" to ordered list of artifact ids.
artifact_ids_dict = {}
for key, indexed_artifact_ids in indexed_artifact_ids_dict.items():
ordered_artifact_ids = sorted(indexed_artifact_ids, key=lambda x: x[0])
# There shouldn't be any missing or duplicate indices.
indices = [idx for idx, _ in ordered_artifact_ids]
if indices != list(range(0, len(indices))):
raise ValueError(
f'Cannot construct artifact ids dict due to missing or duplicate '
f'indices: {indexed_artifact_ids_dict}')
artifact_ids_dict[key] = [aid for _, aid in ordered_artifact_ids]
# Fetch all the relevant artifacts.
all_artifact_ids = list(itertools.chain(*artifact_ids_dict.values()))
mlmd_artifacts = metadata_handler.store.get_artifacts_by_id(all_artifact_ids)
if len(all_artifact_ids) != len(mlmd_artifacts):
raise ValueError('Could not find all mlmd artifacts for ids: {}'.format(
', '.join(all_artifact_ids)))
# Fetch artifact types and create a map keyed by artifact type id.
artifact_type_ids = set(a.type_id for a in mlmd_artifacts)
artifact_types = metadata_handler.store.get_artifact_types_by_id(
artifact_type_ids)
artifact_types_by_id = {a.id: a for a in artifact_types}
# Set `type` field in the artifact proto which is not filled by MLMD.
for artifact in mlmd_artifacts:
artifact.type = artifact_types_by_id[artifact.type_id].name
# Create a map from artifact id to `types.Artifact` instances.
artifacts_by_id = {
a.id: artifact_utils.deserialize_artifact(artifact_types_by_id[a.type_id],
a) for a in mlmd_artifacts
}
# Create a map from "key" to ordered list of `types.Artifact` to be returned.
# The ordering of artifacts is in accordance with their "index" derived from
# the events above.
result = collections.defaultdict(list)
for key, artifact_ids in artifact_ids_dict.items():
for artifact_id in artifact_ids:
result[key].append(artifacts_by_id[artifact_id])
return result
def set_execution_result(execution_result: execution_result_pb2.ExecutionResult,
execution: metadata_store_pb2.Execution):
"""Sets execution result as a custom property of execution.
Args:
execution_result: The result of execution. It is typically generated by
executor.
execution: The execution to set to.
"""
execution.custom_properties[_EXECUTION_RESULT].string_value = (
json_format.MessageToJson(execution_result))
|
|
from datetime import datetime
import numpy as np
from pandas import (Series, TimeSeries, DataFrame, DataMatrix, WidePanel,
LongPanel)
from pandas.core.pytools import adjoin
import pandas.lib.tseries as tseries
try:
import tables
except ImportError:
pass
class HDFStore(object):
"""
dict-like IO interface for storing pandas objects in PyTables
format
Parameters
----------
path : string
File path to HDF5 file
Examples
--------
>>> store = HDFStore('test.h5')
>>> store['foo'] = bar # write to HDF5
>>> bar = store['foo'] # retrieve
>>> store.close()
"""
def __init__(self, path):
self.handle = tables.openFile(path, 'a')
def close(self):
self.handle.close()
def flush(self):
self.handle.flush()
def __repr__(self):
output = str(self.__class__) + '\n'
keys = []
values = []
for k, v in sorted(self.handle.root._v_children.iteritems()):
kind = v._v_attrs.pandas_type
keys.append(str(k))
values.append(kind)
output += adjoin(5, keys, values)
return output
def get(self, key):
"""
Retrieve pandas object stored in file
Parameters
----------
key : object
"""
return self[key]
def put(self, key, value):
"""
Store object in file
Parameters
----------
key : object
value : {Series, DataFrame, WidePanel, LongPanel}
pandas data structure
"""
self[key] = value
def __getitem__(self, key):
group = getattr(self.handle.root, key)
return _read_group(group)
def __setitem__(self, key, value):
self._write_group(key, value)
def _write_group(self, key, value):
root = self.handle.root
if key not in root._v_children:
group = self.handle.createGroup(root, key)
else:
group = getattr(root, key)
kind = type(value)
handler = self._get_write_handler(kind)
try:
handler(group, value)
except Exception:
raise
group._v_attrs.pandas_type = kind.__name__
return True
def _write_series(self, group, series):
self._write_index(group, 'index', series.index)
self._write_array(group, 'values', np.asarray(series))
def _write_frame(self, group, df):
self._write_index(group, 'index', df.index)
self._write_index(group, 'columns', df.columns)
self._write_array(group, 'values', df.asMatrix(df.columns))
def _write_matrix(self, group, dm):
self._write_index(group, 'index', dm.index)
self._write_index(group, 'columns', dm.columns)
self._write_array(group, 'values', dm.values)
if dm.objects is not None:
self._write_index(group, 'obj_columns', dm.objects.columns)
self._write_array(group, 'obj_values', dm.objects.values)
def _write_wide(self, group, value):
pass
def _write_long(self, group, value):
pass
def _write_index(self, group, key, value):
converted, kind = _convert_index(value)
self._write_array(group, key, converted)
node = getattr(group, key)
node._v_attrs.kind = kind
def _write_array(self, group, key, value):
if key in group:
self.handle.removeNode(group, key)
self.handle.createArray(group, key, value)
def _get_write_handler(self, kind):
handlers = {
Series : self._write_series,
TimeSeries : self._write_series,
DataFrame : self._write_frame,
DataMatrix : self._write_matrix,
WidePanel : self._write_wide,
LongPanel : self._write_long
}
return handlers[kind]
def _read_group(group):
kind = group._v_attrs.pandas_type
if kind in ('Series', 'TimeSeries'):
return _read_series(group)
elif kind == 'DataFrame':
return _read_frame(group)
elif kind == 'DataMatrix':
return _read_matrix(group)
elif kind == 'WidePanel':
return _read_wide(group)
elif kind == 'LongPanel':
return _read_long(group)
def _read_series(group):
index = _read_index(group, 'index')
values = group.values[:]
return Series(values, index=index)
def _read_frame(group):
index = _read_index(group, 'index')
columns = _read_index(group, 'columns')
values = group.values[:]
return DataFrame(values, index=index, columns=columns)
def _read_matrix(group):
index = _read_index(group, 'index')
columns = _read_index(group, 'columns')
values = group.values[:]
objects = None
if hasattr(group, 'obj_columns'):
obj_columns = _read_index(group, 'columns')
obj_values = group.obj_values[:]
objects = DataMatrix(obj_values, index=index, columns=obj_columns)
return DataMatrix(values, index=index, columns=columns,
objects=objects)
def _read_wide(group):
index = _read_index(group, 'index')
values = group.values[:]
return Series(values, index=index)
def _read_long(group):
index = _read_index(group, 'index')
values = group.values[:]
return Series(values, index=index)
def _read_index(group, key):
node = getattr(group, key)
data = node[:]
kind = node._v_attrs.kind
return _unconvert_index(data, kind)
def _convert_index(index):
# Let's assume the index is homogeneous
values = np.asarray(index)
if isinstance(values[0], datetime):
converted = tseries.array_to_timestamp(values)
return converted, 'datetime'
elif isinstance(values[0], basestring):
converted = np.array(list(values), dtype=np.str_)
return converted, 'string'
else:
return np.array(list(values)), 'other'
def _unconvert_index(data, kind):
if kind == 'datetime':
index = tseries.array_to_datetime(data)
elif kind == 'string':
index = np.array(data, dtype=object)
else:
index = data
return index
|
|
# encoding: utf-8
import re
import warnings
import numpy as np
import pandas as pd
import sys
default_stdout = sys.stdout
default_stderr = sys.stderr
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdout = default_stdout
sys.stderr = default_stderr
BASE_COL = '@'
BASE_ROW = ['is_counts', 'is_c_base']
PCT_TYPES = ['is_c_pct', 'is_r_pct']
NOT_PCT_TYPES = ['is_stat']
CONTINUATION_STR = "(continued {})"
MAX_PIE_ELMS = 4
def float2String(input, ndigits=0):
"""
Round and converts the input, if int/float or list of, to a string.
Parameters
----------
input: int/float or list of int/float
ndigits: int
number of decimals to round to
Returns
-------
output: string or list of strings
depending on the input
"""
output = input
if not isinstance(input, list):
output = [output]
output = map(lambda x: round(x, ndigits), output)
output = map(int, output)
output = map(str, output)
if not isinstance(input, list):
output = output[0]
return output
def uniquify(l):
"""
Return the given list without duplicates, retaining order.
See Dave Kirby's order preserving uniqueifying list function
http://www.peterbe.com/plog/uniqifiers-benchmark
"""
seen = set()
seen_add = seen.add
uniques = [x for x in l if x not in seen and not seen_add(x)]
return uniques
def strip_levels(df, rows=None, columns=None):
"""
Function that strips a MultiIndex DataFrame for specified row and column index
Parameters
----------
df: pandas.DataFrame
rows: int
Row index to remove, default None
columns: int
Column index to remove, default None
Returns
-------
df_strip: pandas.DataFrame
The input dataframe stripped for specified levels
"""
df_strip = df.copy()
if rows is not None:
if df_strip.index.nlevels > 1:
df_strip.index = df_strip.index.droplevel(rows)
if columns is not None:
if df_strip.columns.nlevels > 1:
df_strip.columns = df_strip.columns.droplevel(columns)
return df_strip
def as_numeric(df):
"""
Runs through all values in input DataFrame and replaces
',' to '.'
'%' to ''
'-' to '0'
'*' to '0'
Parameters
----------
df : pandas.DataFrame
Returns
-------
pandas.DataFrame
with values as float
"""
if not df.values.dtype in ['float64', 'int64']:
data = [[float(str(value).replace(',','.').replace('%','').replace('-','0').replace('*','0')) for value in values] for values in df.values]
df = pd.DataFrame(data, index=df.index, columns=df.columns)
return df.copy()
def is_grid_slice(chain):
"""
Returns True if chain is a grid slice
Parameters
----------
chain: quantipy.Chain
Returns
-------
bool
True if grid slice
"""
pattern = '\[\{.*?\}\].'
found = re.findall(pattern, chain.name)
if len(found) > 0 and chain._array_style == -1:
return True
def get_indexes_from_list(lst, find, exact=True):
"""
Helper function that search for element in a list and
returns a list of indexes for element match
E.g.
get_indexes_from_list([1,2,3,1,5,1], 1) returns [0,3,5]
get_indexes_from_list(['apple','banana','orange','lemon'], 'orange') -> returns [2]
get_indexes_from_list(['apple','banana','lemon',['orange', 'peach']], 'orange') -> returns []
get_indexes_from_list(['apple','banana','lemon',['orange', 'peach']], ['orange'], False) -> returns [3]
Parameters
----------
lst: list
The list to look in
find: any
the element to find, can be a list
exact: bool
If False then index are returned if find in lst-item otherwise
only if find = lst-item
Returns
-------
list of int
"""
if exact == True:
return [index for index, value in enumerate(lst) if value == find]
else:
if isinstance(find,list):
return [index for index, value in enumerate(lst) if set(find).intersection(set(value))]
else:
return [index for index, value in enumerate(lst) if find in value]
def auto_charttype(df, array_style, max_pie_elms=MAX_PIE_ELMS):
"""
Auto suggest chart type based on dataframe analysis
TODO Move this to Class PptxDataFrame()
Parameters
----------
df: pandas.DataFrame
Not multiindex
array_style: int
array_style as returned from Chain Class
max_pie_elms: int
Max number of elements in Pie chart
Returns
-------
str
One of charttypes ('bar_clustered', 'bar_stacked_100', 'pie')
"""
if array_style == -1: # Not array summary
chart_type = 'bar_clustered'
if len(df.index.get_level_values(-1)) <= max_pie_elms:
if len(df.columns.get_level_values(-1)) == 1:
chart_type = 'pie'
elif array_style == 0:
chart_type = 'bar_stacked_100'
# TODO _auto_charttype - return 'bar_stacked' if rows not sum to 100
else:
chart_type = 'bar_clustered'
return chart_type
def fill_gaps(l):
"""
Return l replacing empty strings with the value from the previous position.
Parameters
----------
l: list
Returns
-------
list
"""
lnew = []
for i in l:
if i == '':
lnew.append(lnew[-1])
else:
lnew.append(i)
return lnew
def fill_index_labels(df):
"""
Fills in blank labels in the second level of df's multi-level index.
Parameters
----------
df: pandas.DataFrame
Returns
-------
pandas.DataFrame
"""
_0, _1 = zip(*df.index.values.tolist())
_1new = fill_gaps(_1)
dfnew = df.copy()
dfnew.index = pd.MultiIndex.from_tuples(zip(_0, _1new), names=df.index.names)
return dfnew
def fill_column_values(df, icol=0):
"""
Fills empty values in the targeted column with the value above it.
Parameters
----------
df: pandas.DataFrame
icol: int
Returns
-------
pandas.DataFrame
"""
v = df.iloc[:,icol].fillna('').values.tolist()
vnew = fill_gaps(v)
dfnew = df.copy() # type: pd.DataFrame
dfnew.iloc[:,icol] = vnew
return dfnew
class PptxDataFrame(object):
"""
Class for handling the dataframe to be charted.
The class is instantiated from the class PptxChain and holds
the chains dataframe, flattened and ready for charting.
A series of get cell-types methods can be used to select specific cell-types.
Parameters:
----------
df: pandas.DataFrame
The actual dataframe ready to use with PptxPainter
array_style: int
Array style as given by quantipy.chain.array_style
cell_types: list
The dataframes cell types as given by quantipy.chain.contents
"""
def __init__(self, dataframe, cell_types, array_style):
self.array_style = array_style
self.cell_items = cell_types
self.df = dataframe # type: pd.DataFrame
self.__frames = []
def __call__(self):
return self.df
def to_table(self, decimals=2, pct_decimals=2, decimal_separator='.'):
"""
Returns self.df formatted to be added to a table in a slide.
Basically just rounds values and if cell type = % then multiply values with 100
# todo : should'nt be here, move to PptxPainter
Parameters
----------
decimals: int
Number of decimals for not percentage cell_types
pct_decimals: int
Number of decimals for percentage cell_types
decimal_separator: str
Returns
-------
self
"""
df = self.df
if df.empty:
return self
if self.array_style == -1:
df = df.T
df = df.fillna('')
# Percent type cells
pct_indexes = get_indexes_from_list(self.cell_items, PCT_TYPES, exact=False)
df.iloc[:, pct_indexes] *= 100
df.iloc[:, pct_indexes] = df.iloc[:, pct_indexes].round(decimals=pct_decimals)
# Not percent type cells
not_pct_indexes = get_indexes_from_list(self.cell_items, NOT_PCT_TYPES, exact=False)
df.iloc[:, not_pct_indexes] = df.iloc[:, not_pct_indexes].round(decimals=decimals)
df = df.astype('str')
if pct_decimals == 0 or decimals == 0:
pct_columns = df.columns[pct_indexes].tolist() if pct_decimals == 0 else []
not_pct_columns = df.columns[not_pct_indexes].tolist() if decimals == 0 else []
columns = pct_columns + not_pct_columns
df[columns] = df[columns].replace('\.0', '', regex=True)
if not decimal_separator == '.':
df = df.replace('\.', ',', regex=True)
if self.array_style == -1:
df = df.T
self.df = df
return self
def _select_categories(self, categories):
"""
Returns a copy of self.df having only the categories requested
Parameters
----------
categories : list
A list of ints specifying the categories from self.df to return
Returns
-------
pptx_df_copy : PptxDataFrame
"""
if self.array_style == -1:
df_copy=self.df.iloc[categories]
else:
df_copy = self.df.iloc[:,categories]
pptx_df_copy = PptxDataFrame(df_copy,self.cell_items,self.array_style)
pptx_df_copy.cell_items = [self.cell_items[i] for i in categories]
return pptx_df_copy
def get_means(self):
"""
Return a copy of the PptxDataFrame containing only mean type categories
Returns
-------
PptxDataFrame
"""
return self.get('means')
def get_nets(self):
"""
Return a copy of the PptxDataFrame only containing net type categories
Returns
-------
PptxDataFrame
"""
return self.get('net')
def get_cpct(self):
"""
Return a copy of the PptxDataFrame only containing column percentage categories
Returns
-------
PptxDataFrame
"""
return self.get('c_pct')
def get_propstest(self):
"""
Return a copy of the PptxDataFrame only containing sig testing type categories
Returns
-------
PptxDataFrame
"""
return self.get('tests')
def get_stats(self):
"""
Return a copy of the PptxDataFrame only containing stat type categories
Returns
-------
PptxDataFrame
"""
return self.get('stats')
def _get_propstest_index(self):
"""
Return a list of index numbers from self.cell_items of type 'is_propstest'
Returns
-------
row_list : list
"""
row_list = get_indexes_from_list(self.cell_items, 'is_propstest', exact=False)
return row_list
def _get_stats_index(self):
"""
Return a list of index numbers from self.cell_items of type 'is_stat'
Returns
-------
row_list : list
"""
row_list = get_indexes_from_list(self.cell_items, 'is_stat', exact=False)
return row_list
def _get_cpct_index(self):
"""
Return a list of index numbers from self.cell_items of type 'is_c_pct' and not types
'is_net', 'net', 'is_c_pct_sum'
Returns
-------
row_list : list
"""
row_list = get_indexes_from_list(self.cell_items, 'is_c_pct', exact=False)
dont_want = get_indexes_from_list(self.cell_items, ['is_net', 'net', 'is_c_pct_sum'], exact=False)
not_net = get_indexes_from_list(self.cell_items, ['normal', 'expanded'], exact=False)
for x in dont_want:
if x in row_list and x not in not_net:
row_list.remove(x)
return row_list
def _get_nets_index(self):
"""
Return a list of index numbers from self.cell_items of types 'is_net' or 'net' and not types
'is_propstest', 'calc', 'normal', 'is_c_pct_sum', 'is_counts', 'expanded'
Returns
-------
row_list : list
"""
row_list = get_indexes_from_list(self.cell_items, ['is_net', 'net'], exact=False)
dont_want = get_indexes_from_list(self.cell_items,
['is_propstest', 'calc', 'normal', 'is_c_pct_sum', 'is_counts', 'expanded'],
exact=False)
for x in dont_want:
if x in row_list:
row_list.remove(x)
return row_list
def _get_means_index(self):
"""
Return a list of index numbers from self.cell_items of type 'is_mean' and not type
'is_meanstest'
Returns
-------
row_list : list
"""
row_list = get_indexes_from_list(self.cell_items, ['is_mean'], exact=False)
dont_want = get_indexes_from_list(self.cell_items, ['is_meanstest'], exact=False)
for x in dont_want:
if x in row_list:
row_list.remove(x)
return row_list
def get(self, cell_types, original_order=True):
"""
Method to get specific cell types from chains dataframe.
Will return a copy of the PptxDataFrame instance containing only
the requested cell types.
Available types are 'c_pct, net, mean, test, stat'
Parameters
----------
cell_types : str
A string of comma separated cell types to return.
original_order: Bool
Only relevant if more than one cell type is requested.
If True, cell types are returned in the same order as input dataframe.
If False, cell types will be returned in the order they are requested.
Returns
-------
PptxDataFrame
"""
method_map = {'c_pct': self._get_cpct_index,
'pct': self._get_cpct_index,
'net': self._get_nets_index,
'nets': self._get_nets_index,
'mean': self._get_means_index,
'means': self._get_means_index,
'test': self._get_propstest_index,
'tests': self._get_propstest_index,
'stats': self._get_stats_index,
'stat': self._get_stats_index}
# TODO Add methods for 'stddev', 'min', 'max', 'median', 't_means'
available_cell_types = set(method_map.keys())
if isinstance(cell_types, basestring):
cell_types = re.sub(' +', '', cell_types)
cell_types = cell_types.split(',')
value_test = set(cell_types).difference(available_cell_types)
if value_test:
raise ValueError("Cell type: {} is not an available cell type. \n Available cell types are {}".format(cell_types, available_cell_types))
cell_types_list = []
for cell_type in cell_types:
cell_types_list.extend(method_map[cell_type]())
if original_order: cell_types_list.sort()
new_pptx_df = self._select_categories(cell_types_list)
return new_pptx_df
class PptxChain(object):
"""
This class is a wrapper around Chain class to prepare for PPTX charting.
Parameters
----------
chain: quantipy.sandbox.sandbox.Chain
is_varname_in_qtext: Bool
Is question name is included in question text?
False: No question name included in question text
True: Question name included in question text, mask items has short question name included.
'Full': Question name included in question text, mask items has full question name included.
crossbreak: str
Select a crossbreak to include in charts. Default is None
base_type: str
Select the base type to show in base descriptions: 'weighted' or 'unweighted'
decimals: int
Select the number of decimals to include from Chain.dataframe
verbose: Bool
"""
def __init__(self, chain, is_varname_in_qtext=True, crossbreak=None, base_type='weighted', decimals=2, verbose=True):
self._chart_type = None
self._sig_test = None # type: list # is updated by ._select_crossbreak()
self.crossbreak_qtext = None # type: str # is updated by ._select_crossbreak()
self.verbose = verbose
self._decimals = decimals
self._chain = chain
self.name = chain.name
self.xkey_levels = chain.dataframe.index.nlevels
self.ykey_levels = chain.dataframe.columns.nlevels
self.index_map = self._index_map()
self.is_mask_item = chain._is_mask_item
self.x_key_name = chain._x_keys[0]
self.source = chain.source
self._var_name_in_qtext = is_varname_in_qtext
self.array_style = chain.array_style
self.is_grid_summary = True if chain.array_style in [0,1] else False
self.crossbreak = self._check_crossbreaks(crossbreak) if crossbreak else [BASE_COL]
self.x_key_short_name = self._get_short_question_name()
self.chain_df = self._select_crossbreak() # type: pd.DataFrame
self.xbase_indexes = self._base_indexes()
self.xbase_labels = ["Base"] if self.xbase_indexes is None else [x[0] for x in self.xbase_indexes]
self.xbase_count = ""
self.xbase_label = ""
self.xbase_index = 0
self.ybases = None
self.select_base(base_type=base_type)
self.base_description = "" if chain.base_descriptions is None else chain.base_descriptions
if self.base_description[0:6].lower() == "base: ": self.base_description = self.base_description[6:]
self._base_text = None
self.question_text = self.get_question_text(include_varname=False)
self.chart_df = self.prepare_dataframe()
self.continuation_str = CONTINUATION_STR
self.vals_in_labels = False
def __str__(self):
str_format = ('Table name: {}'
'\nX key name: {}'
'\nShort x key name: {}'
'\nGrid summary: {}'
'\nQuestion text: {}'
'\nBase description: {}'
'\nBase label: {}'
'\nBase size: {}'
'\nRequested crossbreak: {}'
'\n')
return str_format.format(getattr(self, 'name', 'None'),
getattr(self, 'x_key_name', 'None'),
getattr(self, 'x_key_short_name', 'None'),
getattr(self, 'is_grid_summary', 'None'),
getattr(self, 'question_text', 'None'),
getattr(self, 'base_description', 'None'),
getattr(self, 'xbase_labels', 'None'),
getattr(self, 'ybases', 'None'),
getattr(self, 'crossbreak', 'None'))
def __repr__(self):
return self.__str__()
@property
def sig_test(self):
# Get the sig testing
sig_df = self.prepare_dataframe()
sig_df = sig_df.get_propstest()
_sig_test = sig_df.df.values.tolist()
# Assume that all items in the list of sig tests has same length
check_list = map(lambda x: len(x), _sig_test)
assert check_list.count(check_list[0]) == len(check_list), 'List of sig test results is not uniform'
self._sig_test = [zip(*_sig_test)[i] for i in range(len(_sig_test[0]))]
return self._sig_test
@property
def chart_type(self):
if self._chart_type is None:
self._chart_type = auto_charttype(self.chart_df.get('pct,net').df, self.array_style)
return self._chart_type
@chart_type.setter
def chart_type(self, chart_type):
self._chart_type = chart_type
def _base_indexes(self):
"""
Finds all categories of type 'is_counts' and 'is_c_base' and then returns
a list of tuples holding (label, index, cell_content, value) for each base.
Method only used when instantiating Class.
Poppulates self.xbase_indexes
Eg. [(u'Unweighted base', 0, ['is_counts', 'is_c_base'], 1003.0),
(u'Base', 1, ['weight_1', 'is_weighted', 'is_counts', 'is_c_base'], 1002.9999999398246)]
Returns
-------
list
"""
cell_contents = self._chain.describe()
if self.array_style == 0:
row = min([k for k, va in cell_contents.items()
if any(pct in v for v in va for pct in PCT_TYPES)])
cell_contents = cell_contents[row]
# Find base rows
bases = get_indexes_from_list(cell_contents, BASE_ROW, exact=False)
skip = get_indexes_from_list(cell_contents, ['is_c_base_gross'], exact=False)
base_indexes = [idx for idx in bases if not idx in skip] or bases
# Show error if no base elements found
if not base_indexes:
#msg = "No 'Base' element found, base size will be set to None"
#warnings.warn(msg)
return None
cell_contents = [cell_contents[x] for x in base_indexes]
if self.array_style == -1 or self.array_style == 1:
xlabels = self._chain.dataframe.index.get_level_values(-1)[base_indexes].tolist()
base_counts = self._chain.dataframe.iloc[base_indexes, 0]
else:
xlabels = self._chain.dataframe.columns.get_level_values(-1)[base_indexes].tolist()
base_counts = self._chain.dataframe.iloc[0, base_indexes]
return zip(xlabels, base_indexes, cell_contents, base_counts)
def select_base(self,base_type='weighted'):
"""
Uses self.xbase_indexes to set
self.xbase_label,
self.xbase_count,
self.xbase_index
self.ybases
Parameters
----------
base_type: str
String to define which base type to use: 'weighted' or 'unweighted'
Returns
-------
None, sets self
"""
if not self.xbase_indexes:
msg = "No 'Base' element found"
warnings.warn(msg)
return None
if base_type: base_type = base_type.lower()
if not base_type in ['unweighted','weighted']:
raise TypeError('base_type misspelled, choose weighted or unweighted')
cell_contents = [x[2] for x in self.xbase_indexes]
if base_type == 'weighted':
index = [x for x, items in enumerate(cell_contents) if 'is_weighted' in items]
else:
index = [x for x, items in enumerate(cell_contents) if not 'is_weighted' in items]
if not index: index=[0]
# print "self.xbase_indexes: ", self.xbase_indexes
total_base = self.xbase_indexes[index[0]][3]
total_base = np.around(total_base, decimals=self._decimals)
self.xbase_count = float2String(total_base)
self.xbase_label = self.xbase_labels[index[0]]
self.xbase_index = self.xbase_indexes[index[0]][1]
self.ybases = self._get_y_bases()
def _get_y_bases(self):
"""
Retrieves the y-keys base label and base size from the dataframe.
If no crossbreak is requested the output is a list with one tuple, eg. [(u'Total', '1003')].
If eg. 'gender' is selected as crossbreak the output is [(u'Female', '487'), (u'Male', '516')]
Only used in method select_base to poppulate self.ybases.
Returns
-------
list
List of tuples [(base label, base size)]
"""
base_index = self.xbase_index
if not self.is_grid_summary:
# Construct a list of tuples with (base label, base size, test letter)
base_values = self.chain_df.iloc[base_index, :].values.tolist()
base_values = np.around(base_values, decimals=self._decimals).tolist()
base_values = float2String(base_values)
base_labels = list(self.chain_df.columns.get_level_values('Values'))
if self._chain.sig_levels:
base_test = list(self.chain_df.columns.get_level_values('Test-IDs'))
bases = zip(base_labels, base_values, base_test)
else:
bases = zip(base_labels, base_values)
else: # Array summary
# Find base columns
# Construct a list of tuples with (base label, base size)
base_values = self.chain_df.T.iloc[base_index,:].values.tolist()
base_values = np.around(base_values, decimals=self._decimals).tolist()
base_values = float2String(base_values)
base_labels = list(self.chain_df.index.get_level_values(-1))
bases = zip(base_labels, base_values)
#print ybases
return bases
def _index_map(self):
"""
Map not painted self._chain.dataframe.index with painted index into
a list of tuples (notpainted, painted).
If grid summary, self._chain.dataframe.columns are map'ed instead.
Example:
[('All', u'Base'), (1, u'Yes'), ('', u''), (2, u'No'), ('', u''), (8, u'Dont know'),
('', u''), ('sum', u'Totalsum')]
Only used to poppulate self.index_map in __init__
Returns
-------
list
"""
if self._chain.painted: # UnPaint if painted
self._chain.toggle_labels()
if self._chain.array_style == -1:
unpainted_index = self._chain.dataframe.index.get_level_values(-1).tolist()
else:
unpainted_index = self._chain.dataframe.columns.get_level_values(-1).tolist()
if not self._chain.painted: # Paint if not painted
self._chain.toggle_labels()
if self._chain.array_style == -1:
painted_index = self._chain.dataframe.index.get_level_values(-1).tolist()
else:
painted_index = self._chain.dataframe.columns.get_level_values(-1).tolist()
return zip(unpainted_index, painted_index)
def _select_crossbreak(self):
"""
Takes self._chain.dataframe and returns a copy with only the columns
stated in self.crossbreak.
Only used to poppulate self.chain_df in __init__.
Returns
-------
pd.DataFrame
"""
cell_items = self._chain.cell_items.split('_')
if not self.is_grid_summary:
# Keep only requested columns
if self._chain.painted: # UnPaint if painted
self._chain.toggle_labels()
all_columns = self._chain.dataframe.columns.get_level_values(0).tolist() # retrieve a list of the not painted column values for outer level
if self._chain.axes[1].index(BASE_COL) == 0:
all_columns[0] = BASE_COL # Need '@' as the outer column label
column_selection = []
for cb in self.crossbreak:
column_selection = column_selection + (get_indexes_from_list(all_columns, cb))
if not self._chain.painted: # Paint if not painted
self._chain.toggle_labels()
all_columns = self._chain.dataframe.columns.get_level_values(0).tolist() # retrieve a list of painted column values for outer level
col_qtexts = [all_columns[x] for x in column_selection] # determine painted column values for requested crossbreak
self.crossbreak_qtext = uniquify(col_qtexts) # Save q text for crossbreak in class atribute
# Slice the dataframes columns based on requested crossbreaks
df = self._chain.dataframe.iloc[:, column_selection]
if len(cell_items) > 1:
df = fill_index_labels(df)
else:
if len(cell_items) > 1:
cell_contents = self._chain.describe()
rows = [k for k, va in cell_contents.items()
if any(pct in v for v in va for pct in PCT_TYPES)]
df_filled = fill_index_labels(fill_column_values(self._chain.dataframe))
df = df_filled.iloc[rows, :]
#for index in base_indexes:
# base_values = self.chain.dataframe.iloc[rows_bad, index].values
# base_column = self.chain.dataframe.columns[index]
# df.loc[:,[base_column]] = base_values
else:
df = self._chain.dataframe
df_rounded = np.around(df, decimals=self._decimals, out=None)
return df_rounded
@property
def ybase_values(self):
"""
Returns a list with y base values picked from self.ybases.
Returns
-------
list
"""
if not hasattr(self, "_ybase_values"):
self._ybase_values=[x[1] for x in self.ybases]
return self._ybase_values
@property
def ybase_value_labels(self):
"""
Returns a list with y base labels picked from self.ybases.
Returns
-------
list
"""
if not hasattr(self, "_ybase_value_labels"):
self._ybase_value_labels=[x[0] for x in self.ybases]
return self._ybase_value_labels
@property
def ybase_test_labels(self):
"""
Returns a list with y base test labels picked from self.ybases.
Eg. ['A', 'B']
Returns
-------
list
"""
if not hasattr(self, "_ybase_test_labels"):
if self.is_grid_summary:
self._ybase_test_labels = None
return None
self._ybase_test_labels=[x[2] for x in self.ybases]
return self._ybase_test_labels
def add_test_letter_to_column_labels(self, sep=" ", prefix=None, circumfix='()'):
"""
Adds test letter to dataframe column labels.
Parameters
----------
sep: str
A string to separate the column label from the test letter, default is a single space.
prefix: str
An optional prefix.
circumfix: str
A two char string used to enclose the test letter.
Default '()'
Returns
-------
None
changes self.chain_df
"""
# Checking input
if circumfix is None:
circumfix = list(('',) * 2)
else:
if not isinstance(circumfix, str) or len(circumfix) <> 2:
raise TypeError("Parameter circumfix needs a string with length 2")
circumfix = list(circumfix)
str_parameters = ['sep', 'prefix']
for i in str_parameters:
if not isinstance(eval(i), (str, type(None))):
raise TypeError("Parameter {} must be a string".format(i))
if self.is_grid_summary:
pass
else:
column_labels = self.chain_df.columns.get_level_values('Values')
# Edit labels
new_labels_list = {}
for x, y in zip(column_labels, self.ybase_test_labels):
new_labels_list.update({x: x + (sep or '') + circumfix[0] + (prefix or '') + y + circumfix[1]})
self.chain_df = self.chain_df.rename(columns=new_labels_list)
def place_vals_in_labels(self, base_position=0, orientation='side', values=None, sep=" ", prefix="n=", circumfix="()", setup='if_differs'):
"""
Takes values from input list and adds them to self.chain_df's categories,
Meaning rows if grid summary, otherwise columns.
Can be used to insert base values in side labels for a grid summary.
Parameters
----------
base_position: for future usage
orientation: for future usage
values: list
a list with same number of values as categories in self.chain_df
sep: str
A string to separate the categories from the insert, default is a single space.
prefix: str
A prefix to add to the insert. Default 'n='
circumfix: str
A two char string used to enclose the insert.
Default '()'
setup: str
A string telling when to insert value ('always', 'if_differs', 'never')
Returns
-------
None
Changes self.chain_df
"""
if setup=='never': return
# Checking input
if circumfix is None:
circumfix = list(('',) * 2)
else:
if not isinstance(circumfix, str) or len(circumfix) <> 2:
raise TypeError("Parameter circumfix needs a string with length 2")
circumfix = list(circumfix)
str_parameters = ['sep', 'prefix', 'orientation', 'setup']
for i in str_parameters:
if not isinstance(eval(i), (str, type(None))):
raise TypeError("Parameter {} must be a string".format(i))
valid_orientation = ['side', 'column']
if orientation not in valid_orientation:
raise ValueError("Parameter orientation must be either of {}".format(valid_orientation))
valid_setup = ['always', 'if_differs', 'never']
if setup not in valid_setup:
raise ValueError("Parameter setup must be either of {}".format(valid_setup))
if self.is_grid_summary:
if (len(uniquify(self.ybase_values))>1 and setup=='if_differs') or setup=='always':
# grab row labels
index_labels = self.chain_df.index.get_level_values(-1)
# Edit labels
new_labels_list = {}
for x, y in zip(index_labels, values):
new_labels_list.update({x: x + (sep or '') + circumfix[0]+ (prefix or '') + str(y) + circumfix[1]})
self.chain_df = self.chain_df.rename(index=new_labels_list)
self.vals_in_labels = True
else:
# grab row labels
index_labels = self.chain_df.columns.get_level_values('Values')
# Edit labels
new_labels_list = {}
for x, y in zip(index_labels, values):
new_labels_list.update({x: x + (sep or '') + circumfix[0] + (prefix or '') + str(y) + circumfix[1]})
# Saving column index for level 'Question' in case it accidentially gets renamed
index_level_values = self.chain_df.columns.get_level_values('Question')
self.chain_df = self.chain_df.rename(columns=new_labels_list)
# Returning column index for level 'Question' in case it got renamed
self.chain_df.columns.set_levels(index_level_values, level='Question', inplace=True)
self.vals_in_labels = True
@property
def base_text(self):
return self._base_text
@base_text.setter
def base_text(self, base_text):
self._base_text = base_text
def set_base_text(self, base_value_circumfix="()", base_label_suf=":", base_description_suf=" - ", base_value_label_sep=", ", base_label=None):
"""
Returns the full base text made up of base_label, base_description and ybases, with some delimiters.
Setup is "base_label + base_description + base_value"
Parameters
----------
base_value_circumfix: str
Two chars to surround the base value
base_label_suf: str
A string to add after the base label
base_description_suf: str
A string to add after the base_description
base_value_label_sep: str
A string to separate base_values if more than one
base_label: str
An optional string to use instead of self.xbase_label
Returns
-------
str
Sets self._base_text
"""
# Checking input
if base_value_circumfix is None:
base_value_circumfix = list(('',) * 2)
else:
if not isinstance(base_value_circumfix, str) or len(base_value_circumfix) <> 2:
raise TypeError("Parameter base_value_circumfix needs a string with length 2")
base_value_circumfix = list(base_value_circumfix)
str_parameters = ['base_label_suf', 'base_description_suf', 'base_value_label_sep', 'base_label']
for i in str_parameters:
if not isinstance(eval(i), (str, type(None))):
raise TypeError("Parameter {} must be a string".format(i))
# Base_label
if base_label is None:
base_label = self.xbase_label
if self.base_description:
base_label = u"{}{}".format(base_label,base_label_suf or '')
else:
base_label = u"{}".format(base_label)
# Base_values
if self.xbase_indexes:
base_values = self.ybase_values[:]
for index, base in enumerate(base_values):
base_values[index] = u"{}{}{}".format(base_value_circumfix[0], base, base_value_circumfix[1])
else:
base_values=[""]
# Base_description
base_description = ""
if self.base_description:
if len(self.ybases) > 1 and not self.vals_in_labels and self.array_style==-1:
base_description = u"{}{}".format(self.base_description, base_description_suf or '')
else:
base_description = u"{} ".format(self.base_description)
# ybase_value_labels
base_value_labels = self.ybase_value_labels[:]
# Include ybase_value_labels in base values if more than one base value
base_value_text = ""
if base_value_label_sep is None: base_value_label_sep = ''
if len(base_values) > 1:
if not self.vals_in_labels:
if self.xbase_indexes:
for index, label in enumerate(zip(base_value_labels, base_values)):
base_value_text=u"{}{}{} {}".format(base_value_text, base_value_label_sep, label[0], label[1])
base_value_text = base_value_text[len(base_value_label_sep):]
else:
for index, label in enumerate(base_value_labels):
base_value_text=u"{}{}{}".format(base_value_text, base_value_label_sep, label)
base_value_text = base_value_text[len(base_value_label_sep):]
else:
if not self.is_grid_summary:
base_value_text = u"({})".format(self.xbase_count)
# Final base text
if not self.is_grid_summary:
if len(self.ybases) == 1:
if base_description:
base_text = u"{} {}{}".format(base_label,base_description,base_values[0])
else:
base_text = u"{} {}".format(base_label, base_values[0])
else:
if base_description:
base_text = u"{} {}{}".format(base_label,base_description,base_value_text)
else:
base_text = u"{} {}".format(base_label,base_value_text)
else: # Grid summary
if len(uniquify(self.ybase_values)) == 1:
if base_description:
base_text = u"{} {}{}".format(base_label,base_description,base_values[0])
else:
base_text = u"{} {}".format(base_label, base_values[0])
else:
if base_description:
base_text = u"{} {}".format(base_label, base_description)
else:
base_text = ""
self._base_text = base_text
def _check_crossbreaks(self, crossbreaks):
"""
Checks the crossbreaks input for duplicates and that crossbreak exist in the chain.
Parameters
----------
crossbreaks: list
List of strings
Returns
-------
list
The crossbreaks list stripped for duplicates and not existing crossbreaks
"""
if not isinstance(crossbreaks, list):
crossbreaks = [crossbreaks]
if not self.is_grid_summary:
for cb in crossbreaks[:]:
if cb not in self._chain.axes[1]:
crossbreaks.remove(cb)
if self.verbose:
msg = 'Requested crossbreak: \'{}\' is not found for chain \'{}\' and will be ignored'.format(cb, chain.name)
warnings.warn(msg)
if crossbreaks == []: crossbreaks = None
else:
pass # just ignore checking if Grid Summary
#crossbreaks = None
return uniquify(crossbreaks) if crossbreaks is not None else [BASE_COL]
def _get_short_question_name(self):
"""
Retrieves 'short' question name.
Used in __init__ to poppulate self.x_key_short_name
Returns
-------
str
"""
if not self.is_grid_summary: # Not grid summary
if self.is_mask_item: # Is grid slice
pattern = '(?<=\[\{).*(?=\}\])'
result_list = re.findall(pattern, self.x_key_name)
if result_list:
return result_list[0] # TODO Hmm what if grid has more than one level
else:
return self.x_key_name
else: # Not grid slice
return self.x_key_name
else: # Is grid summary
find_period = self.x_key_name.find('.')
if find_period > -1:
return self.x_key_name[:find_period]
else:
return self.x_key_name
def get_question_text(self, include_varname=False):
"""
Retrieves the question text from the dataframe.
If include_varname=True then the question text will be prefixed the var name.
Parameters
----------
include_varname: Bool
Returns
-------
str
"""
# Get variable name
var_name = self.x_key_name
if self.is_mask_item:
if self._var_name_in_qtext == True:
var_name = self.x_key_short_name
# Get question text, stripped for variable name
question_text = self.chain_df.index[0][0]
if self._var_name_in_qtext:
question_text = question_text[len(var_name) + 2:]
# Include the full question text for mask items if missing
if self.is_mask_item:
question_text = self._mask_question_text(question_text)
# Add variable name to question text if requested
if include_varname:
question_text = u'{}. {}'.format(self.x_key_short_name, question_text)
# Remove consecutive line breaks and spaces
question_text = re.sub('\n+', '\n', question_text)
question_text = re.sub('\r+', '\r', question_text)
question_text = re.sub(' +', ' ', question_text)
return question_text.strip()
def _mask_question_text(self, question_text):
"""
If chain is a mask item (a grid slice), then the parent question text
is added to question text unless already included.
Final question text in the form "parent_question_text - mask_question_text"
Only used in self.get_question_text().
Parameters
----------
question_text: str
Returns
-------
str
"""
if self.source == "native":
if self.is_mask_item:
meta = self._chain._meta
cols = meta['columns']
masks = meta['masks']
parent = cols[self.x_key_name]['parent'].keys()[0].split('@')[-1]
m_text = masks[parent]['text']
text = m_text.get('x edit', m_text).get(meta['lib']['default text'])
if not text.strip() in question_text:
question_text = u'{} - {}'.format(text, question_text)
return question_text
def prepare_dataframe(self):
"""
Prepares self.chain_df for charting, that is removes all outer levels
and prepares the dataframe for PptxPainter.
Returns
-------
pd.DataFrame
An edited copy of self.chain_df
"""
# Strip outer level
df = strip_levels(self.chain_df, rows=0, columns=0)
df = strip_levels(df, columns=1)
# Strip HTML TODO Is 'Strip HTML' at all nessecary?
# Check that the dataframe is numeric
all_numeric = all(df.applymap(lambda x: isinstance(x, (int, float)))) == True
if not all_numeric:
df = as_numeric(df)
# For rows that are type '%' divide by 100
indexes = []
cell_contents = self._chain.describe()
if self.is_grid_summary:
colpct_row = min([k for k, va in cell_contents.items()
if any(pct in v for v in va for pct in PCT_TYPES)])
cell_contents = cell_contents[colpct_row]
for i, row in enumerate(cell_contents):
for type in row:
for pct_type in PCT_TYPES:
if type == pct_type:
indexes.append(i)
if not self.is_grid_summary:
df.iloc[indexes] /= 100
else:
df.iloc[:, indexes] /= 100
# Make a PptxDataFrame instance
chart_df = PptxDataFrame(df, cell_contents, self.array_style)
# Choose a basic Chart type that will fit dataframe TODO Move this to init of Class PptxDataFrame
chart_df.chart_type = auto_charttype(df, self.array_style)
return chart_df
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import argparse
import socket
import sys
from jasmine.config import Config
from jasmine.standalone import JasmineApp
from jasmine.ci import CIRunner
def begin():
cmd = Command(JasmineApp, CIRunner)
cmd.run(sys.argv[1:])
def mkdir_p(path):
import os
import errno
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class Command(object):
def __init__(self, app, ci_runner):
self.app = app
self.ci_runner = ci_runner
self.parser = argparse.ArgumentParser(description='Jasmine command line')
subcommands = self.parser.add_subparsers(help='commands')
server = subcommands.add_parser('server', help='Jasmine server',
description='run a server hosting your Jasmine specs')
server.add_argument('-p', '--port', type=int, default=8888,
help='The port of the Jasmine html runner')
server.add_argument('-o', '--host', type=str, default='127.0.0.1',
help='The host of the Jasmine html runner')
server.add_argument('-c', '--config', type=str,
help='Custom path to jasmine.yml')
server.set_defaults(func=self.server)
ci = subcommands.add_parser('ci', help='Jasmine CI', description='execute your specs in a browser')
ci.add_argument('-b', '--browser', type=str,
help='The selenium driver to utilize')
ci.add_argument('-l', '--logs', action='store_true',
help='Displays browser logs')
ci.add_argument('-s', '--seed', type=str,
help='Seed for random spec order')
ci.add_argument('-c', '--config', type=str,
help='Custom path to jasmine.yml')
ci.set_defaults(func=self.ci)
init = subcommands.add_parser('init', help='initialize Jasmine', description='')
init.set_defaults(func=self.init)
def run(self, argv):
args = self.parser.parse_args(argv)
if 'func' in args and callable(args.func):
args.func(args)
else:
self.parser.print_help()
self.parser.exit(1)
def server(self, args):
if self._check_for_config(args.config):
jasmine_config = self._load_config(args.config)
try:
jasmine_app = self.app(jasmine_config=jasmine_config)
jasmine_app.run(host=args.host, port=args.port, blocking=True)
except socket.error:
sys.stdout.write('Socket unavailable')
def ci(self, args):
if self._check_for_config(args.config):
jasmine_config = self._load_config(args.config)
jasmine_app = self.app(jasmine_config=jasmine_config)
self.ci_runner(jasmine_config=jasmine_config).run(
browser=args.browser,
show_logs=args.logs,
seed=args.seed,
app=jasmine_app,
)
def _config_paths(self, custom_config_path):
project_path = os.path.realpath(os.path.dirname(__name__))
jasmine_conf = "spec/javascripts/support/jasmine.yml"
if 'JASMINE_CONFIG_PATH' in os.environ:
jasmine_conf = os.environ['JASMINE_CONFIG_PATH']
if custom_config_path is not None:
jasmine_conf = custom_config_path
config_file = os.path.join(
project_path,
jasmine_conf
)
return config_file, project_path
def _check_for_config(self, custom_config_path):
config_file, _ = self._config_paths(custom_config_path)
config_exists = os.path.exists(config_file)
if not config_exists:
print("Could not find your config file at {0}".format(config_file))
return config_exists
def _load_config(self, custom_config_path):
config_file, project_path = self._config_paths(custom_config_path)
return Config(config_file, project_path=project_path)
def query(self, question):
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
prompt = " [Y/n] "
while True:
sys.stdout.write(question + prompt)
try:
choice = raw_input().lower()
except NameError:
choice = input().lower()
if choice == '':
return True
elif choice in valid:
return valid[choice]
def init(self, args):
from jasmine.console_formatter import ConsoleFormatter
spec_dir = os.path.join(os.getcwd(), 'spec/javascripts/')
print(ConsoleFormatter.JASMINE_HEADER)
print('Spec directory')
msg = "About to create {0}... is this okay?".format(spec_dir)
if self.query(msg):
print('making spec/javascripts')
mkdir_p(spec_dir)
yaml_dir = os.path.join(spec_dir, 'support')
yaml_file_path = os.path.join(yaml_dir, 'jasmine.yml')
print(("*" * 80) + '\n\nConfig yaml')
if os.path.exists(yaml_file_path):
print('found existing {0}, not overwriting'.format(yaml_file_path))
else:
msg = "About to create {0}... is this okay?".format(yaml_file_path)
if self.query(msg):
print('making {0}'.format(yaml_dir))
mkdir_p(yaml_dir)
print('making {0}'.format(yaml_file_path))
try:
with open(yaml_file_path, 'w') as f:
f.write(self.YAML_TEMPLATE)
f.flush()
except IOError:
pass
YAML_TEMPLATE = """
# src_files
#
# Return an array of filepaths relative to src_dir to include before jasmine specs.
# Default: []
#
# EXAMPLE:
#
# src_files:
# - lib/source1.js
# - lib/source2.js
# - dist/**/*.js
#
src_files:
# stylesheets
#
# Return an array of stylesheet filepaths relative to src_dir to include before jasmine specs.
# Default: []
#
# EXAMPLE:
#
# stylesheets:
# - css/style.css
# - stylesheets/*.css
#
stylesheets:
# helpers
#
# Return an array of filepaths relative to spec_dir to include before jasmine specs.
# Default: ["helpers/**/*.js"]
#
# EXAMPLE:
#
# helpers:
# - helpers/**/*.js
#
helpers:
- "helpers/**/*.js"
# spec_files
#
# Return an array of filepaths relative to spec_dir to include.
# Default: ["**/*[sS]pec.js"]
#
# EXAMPLE:
#
# spec_files:
# - **/*[sS]pec.js
#
spec_files:
- "**/*[Ss]pec.js"
# src_dir
#
# Source directory path. Your src_files must be returned relative to this path. Will use root if left blank.
# Default: project root
#
# EXAMPLE:
#
# src_dir: public
#
src_dir:
# spec_dir
#
# Spec directory path. Your spec_files must be returned relative to this path.
# Default: spec/javascripts
#
# EXAMPLE:
#
# spec_dir: spec/javascripts
#
spec_dir: spec/javascripts
# stop_spec_on_expectation_failure
#
# Stop executing each spec on the first expectation failure.
# Default: false
#
# EXAMPLE:
#
# stop_spec_on_expectation_failure: true
#
stop_spec_on_expectation_failure:
# stop_on_spec_failure
#
# Stop executing Jasmine after the first spec fails
# Default: false
#
# EXAMPLE:
#
# stop_on_spec_failure: true
#
stop_on_spec_failure:
# random
#
# Run specs in semi-random order.
# Default: true
#
# EXAMPLE:
#
# random: false
#
random:
"""
|
|
# pylint: disable-msg=W0612
from copy import deepcopy
from datetime import datetime, timedelta
from cStringIO import StringIO
import cPickle as pickle
import os
import unittest
from numpy import random
import numpy as np
import pandas.core.datetools as datetools
from pandas.core.index import NULL_INDEX
from pandas.core.api import DataFrame, Index, Series, notnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
randn)
import pandas.util.testing as common
#-------------------------------------------------------------------------------
# DataFrame test cases
class TestDataFrame(unittest.TestCase):
klass = DataFrame
def setUp(self):
self.seriesd = common.getSeriesData()
self.tsd = common.getTimeSeriesData()
self.frame = self.klass(self.seriesd)
self.intframe = self.klass(dict((k, v.astype(int))
for k, v in self.seriesd.iteritems()))
self.tsframe = self.klass(self.tsd)
self.mixed_frame = self.frame.copy()
self.mixed_frame['foo'] = 'bar'
self.ts1 = common.makeTimeSeries()
self.ts2 = common.makeTimeSeries()[5:]
self.ts3 = common.makeTimeSeries()[-5:]
self.ts4 = common.makeTimeSeries()[1:-1]
self.ts_dict = {
'col1' : self.ts1,
'col2' : self.ts2,
'col3' : self.ts3,
'col4' : self.ts4,
}
self.empty = self.klass({})
self.unsortable = self.klass(
{'foo' : [1] * 1000,
datetime.today() : [1] * 1000,
'bar' : ['bar'] * 1000,
datetime.today() + timedelta(1) : ['bar'] * 1000},
index=np.arange(1000))
def test_set_index(self):
idx = Index(np.arange(len(self.mixed_frame)))
self.mixed_frame.index = idx
self.assert_(self.mixed_frame['foo'].index is idx)
def test_constructor(self):
df = self.klass()
self.assert_(len(df.index) == 0)
df = self.klass(data={})
self.assert_(len(df.index) == 0)
df = self.klass(self.frame)
assert_frame_equal(df, self.frame)
def test_constructor_mixed(self):
index, data = common.getMixedTypeDict()
indexed_frame = self.klass(data, index=index)
unindexed_frame = self.klass(data)
def test_constructor_dict(self):
frame = self.klass({'col1' : self.ts1,
'col2' : self.ts2})
common.assert_dict_equal(self.ts1, frame['col1'], compare_keys=False)
common.assert_dict_equal(self.ts2, frame['col2'], compare_keys=False)
frame = self.klass({'col1' : self.ts1,
'col2' : self.ts2},
columns=['col2', 'col3', 'col4'])
self.assertEqual(len(frame), len(self.ts2))
self.assert_('col1' not in frame)
self.assert_(np.isnan(frame['col3']).all())
# Corner cases
self.assertEqual(len(self.klass({})), 0)
self.assertRaises(Exception, lambda x: self.klass([self.ts1, self.ts2]))
# pass dict and array, nicht nicht
self.assertRaises(Exception, self.klass,
{'A' : {'a' : 'a', 'b' : 'b'},
'B' : ['a', 'b']})
# can I rely on the order?
self.assertRaises(Exception, self.klass,
{'A' : ['a', 'b'],
'B' : {'a' : 'a', 'b' : 'b'}})
self.assertRaises(Exception, self.klass,
{'A' : ['a', 'b'],
'B' : Series(['a', 'b'], index=['a', 'b'])})
# Length-one dict micro-optimization
frame = self.klass({'A' : {'1' : 1, '2' : 2}})
self.assert_(np.array_equal(frame.index, ['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = self.klass({}, index=idx)
self.assert_(frame.index is idx)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A' : {'1' : 1, '2' : 2},
'B' : {'1' : '1', '2' : '2', '3' : '3'},
}
frame = self.klass(test_data, dtype=float)
self.assertEqual(len(frame), 3)
self.assert_(frame['B'].dtype == np.float_)
self.assert_(frame['A'].dtype == np.float_)
frame = self.klass(test_data)
self.assertEqual(len(frame), 3)
self.assert_(frame['B'].dtype == np.object_)
self.assert_(frame['A'].dtype == np.float_)
# can't cast to float
test_data = {
'A' : dict(zip(range(20), common.makeDateIndex(20))),
'B' : dict(zip(range(15), randn(15)))
}
frame = self.klass(test_data, dtype=float)
self.assertEqual(len(frame), 20)
self.assert_(frame['A'].dtype == np.object_)
self.assert_(frame['B'].dtype == np.float_)
def test_constructor_ndarray(self):
mat = np.zeros((2, 3), dtype=float)
# 2-D input
frame = self.klass(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.cols()), 3)
# cast type
frame = self.klass(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=int)
self.assert_(frame.values.dtype == np.int_)
# 1-D input
frame = self.klass(np.zeros(3), columns=['A'], index=[1, 2, 3])
self.assertEqual(len(frame.index), 3)
self.assertEqual(len(frame.cols()), 1)
frame = self.klass(['foo', 'bar'], index=[0, 1], columns=['A'])
self.assertEqual(len(frame), 2)
# higher dim raise exception
self.assertRaises(Exception, self.klass, np.zeros((3, 3, 3)),
columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
self.assertRaises(Exception, self.klass, mat,
columns=['A', 'B', 'C'], index=[1])
self.assertRaises(Exception, self.klass, mat,
columns=['A', 'B'], index=[1, 2])
# automatic labeling
frame = self.klass(mat)
self.assert_(np.array_equal(frame.index, range(2)))
self.assert_(np.array_equal(frame.cols(), range(3)))
frame = self.klass(mat, index=[1, 2])
self.assert_(np.array_equal(frame.cols(), range(3)))
frame = self.klass(mat, columns=['A', 'B', 'C'])
self.assert_(np.array_equal(frame.index, range(2)))
# 0-length axis
frame = self.klass(np.empty((0, 3)))
self.assert_(frame.index is NULL_INDEX)
frame = self.klass(np.empty((3, 0)))
self.assert_(len(frame.cols()) == 0)
def test_array_interface(self):
result = np.sqrt(self.frame)
self.assert_(type(result) is type(self.frame))
self.assert_(result.index is self.frame.index)
self.assert_(result.cols() == self.frame.cols())
assert_frame_equal(result, self.frame.apply(np.sqrt))
def test_pickle(self):
unpickled = pickle.loads(pickle.dumps(self.mixed_frame))
assert_frame_equal(self.mixed_frame, unpickled)
def test_toDict(self):
test_data = {
'A' : {'1' : 1, '2' : 2},
'B' : {'1' : '1', '2' : '2', '3' : '3'},
}
recons_data = self.klass(test_data).toDict()
for k, v in test_data.iteritems():
for k2, v2 in v.iteritems():
self.assertEqual(v2, recons_data[k][k2])
def test_fromRecords(self):
# from numpy documentation
arr = np.zeros((2,),dtype=('i4,f4,a10'))
arr[:] = [(1,2.,'Hello'),(2,3.,"World")]
frame = self.klass.fromRecords(arr)
indexed_frame = self.klass.fromRecords(arr, indexField='f1')
self.assertRaises(Exception, self.klass.fromRecords, np.zeros((2, 3)))
# what to do?
records = indexed_frame.toRecords()
self.assertEqual(len(records.dtype.names), 3)
def test_get_agg_axis(self):
cols = self.frame._get_agg_axis(0)
self.assert_(list(cols) == list(self.frame.columns))
idx = self.frame._get_agg_axis(1)
self.assert_(idx is self.frame.index)
self.assertRaises(Exception, self.frame._get_agg_axis, 2)
def test_nonzero(self):
self.assertFalse(self.empty)
self.assert_(self.frame)
self.assert_(self.mixed_frame)
# corner case
df = self.klass({'A' : [1., 2., 3.],
'B' : ['a', 'b', 'c']},
index=np.arange(3))
del df['A']
self.assert_(df)
def test_repr(self):
# empty
foo = repr(self.empty)
# empty with index
frame = self.klass(index=np.arange(1000))
foo = repr(frame)
# small one
foo = repr(self.frame)
# big one
biggie = self.klass(np.zeros((1000, 4)), columns=range(4),
index=range(1000))
foo = repr(biggie)
# mixed
foo = repr(self.mixed_frame)
# big mixed
biggie = self.klass({'A' : randn(1000),
'B' : common.makeStringIndex(1000)},
index=range(1000))
biggie['A'][:20] = np.NaN
biggie['B'][:20] = np.NaN
foo = repr(biggie)
# exhausting cases in DataMatrix.info
# columns but no index
no_index = self.klass(columns=[0, 1, 3])
foo = repr(no_index)
# no columns or index
buf = StringIO()
self.empty.info(buffer=buf)
# columns are not sortable
foo = repr(self.unsortable)
# do not fail!
self.frame.head(buffer=buf)
self.frame.tail(buffer=buf)
for i in range(5):
self.frame['foo%d' % i] = 1
self.frame.head(buffer=buf)
self.frame.tail(buffer=buf)
def test_toString(self):
# big mixed
biggie = self.klass({'A' : randn(1000),
'B' : common.makeStringIndex(1000)},
index=range(1000))
biggie['A'][:20] = np.NaN
biggie['B'][:20] = np.NaN
buf = StringIO()
biggie.toString(buffer=buf)
biggie.toString(buffer=buf, columns=['B', 'A'], colSpace=17)
biggie.toString(buffer=buf, columns=['B', 'A'],
formatters={'A' : lambda x: '%.1f' % x})
biggie.toString(buffer=buf, columns=['B', 'A'],
float_format=str)
frame = self.klass(index=np.arange(1000))
frame.toString(buffer=buf)
def test_getitem(self):
# slicing
sl = self.frame[:20]
self.assertEqual(20, len(sl.index))
# column access
for _, series in sl.iteritems():
self.assertEqual(20, len(series.index))
self.assert_(common.equalContents(series.index, sl.index))
for key, _ in self.frame._series.iteritems():
self.assert_(self.frame[key] is not None)
self.assert_('random' not in self.frame)
self.assertRaises(Exception, self.frame.__getitem__, 'random')
# boolean indexing
d = self.tsframe.index[10]
indexer = self.tsframe.index > d
subindex = self.tsframe.index[indexer]
subframe = self.tsframe[indexer]
self.assert_(np.array_equal(subindex, subframe.index))
self.assertRaises(Exception, self.tsframe.__getitem__, indexer[:-1])
def test_setitem(self):
# not sure what else to do here
series = self.frame['A'][::2]
self.frame['col5'] = series
self.assert_('col5' in self.frame)
common.assert_dict_equal(series, self.frame['col5'],
compare_keys=False)
series = self.frame['A']
self.frame['col6'] = series
common.assert_dict_equal(series, self.frame['col6'],
compare_keys=False)
self.assertRaises(Exception, self.frame.__setitem__,
randn(len(self.frame) + 1))
# set ndarray
arr = randn(len(self.frame))
self.frame['col9'] = arr
self.assert_((self.frame['col9'] == arr).all())
# set value, do out of order for DataMatrix
self.frame['col7'] = 5
assert((self.frame['col7'] == 5).all())
self.frame['col8'] = 'foo'
assert((self.frame['col8'] == 'foo').all())
smaller = self.frame[:2]
smaller['col10'] = ['1', '2']
self.assertEqual(smaller['col10'].dtype, np.object_)
self.assert_((smaller['col10'] == ['1', '2']).all())
def test_delitem(self):
del self.frame['A']
self.assert_('A' not in self.frame)
def test_pop(self):
A = self.frame.pop('A')
self.assert_('A' not in self.frame)
self.frame['foo'] = 'bar'
foo = self.frame.pop('foo')
self.assert_('foo' not in self.frame)
def test_iter(self):
self.assert_(common.equalContents(list(self.frame), self.frame.cols()))
def test_len(self):
self.assertEqual(len(self.frame), len(self.frame.index))
def test_operators(self):
garbage = random.random(4)
colSeries = Series(garbage, index=np.array(self.frame.cols()))
idSum = self.frame + self.frame
seriesSum = self.frame + colSeries
for col, series in idSum.iteritems():
for idx, val in series.iteritems():
origVal = self.frame[col][idx] * 2
if not np.isnan(val):
self.assertEqual(val, origVal)
else:
self.assert_(np.isnan(origVal))
for col, series in seriesSum.iteritems():
for idx, val in series.iteritems():
origVal = self.frame[col][idx] + colSeries[col]
if not np.isnan(val):
self.assertEqual(val, origVal)
else:
self.assert_(np.isnan(origVal))
def test_neg(self):
# what to do?
assert_frame_equal(-self.frame, -1 * self.frame)
def test_firstLastValid(self):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = np.NaN
mat[-5:] = np.NaN
frame = self.klass({'foo' : mat}, index=self.frame.index)
index = frame._firstTimeWithValue()
self.assert_(index == frame.index[5])
index = frame._lastTimeWithValue()
self.assert_(index == frame.index[-6])
def test_combineFrame(self):
frame_copy = self.frame.reindex(self.frame.index[::2])
del frame_copy['D']
frame_copy['C'][:5] = np.NaN
added = self.frame + frame_copy
common.assert_dict_equal(added['A'].valid(),
self.frame['A'] * 2,
compare_keys=False)
self.assert_(np.isnan(added['C'][:5]).all())
self.assert_(np.isnan(added['D']).all())
self_added = self.frame + self.frame
self.assert_(self_added.index.equals(self.frame.index))
added_rev = frame_copy + self.frame
self.assert_(np.isnan(added['D']).all())
# corner cases
# empty
plus_empty = self.frame + self.empty
self.assert_(np.isnan(plus_empty.values).all())
empty_plus = self.empty + self.frame
self.assert_(np.isnan(empty_plus.values).all())
empty_empty = self.empty + self.empty
self.assert_(not empty_empty)
# out of order
reverse = self.frame.reindex(columns=self.frame.columns[::-1])
assert_frame_equal(reverse + self.frame, self.frame * 2)
def test_combineSeries(self):
# Series
series = self.frame.getXS(self.frame.index[0])
added = self.frame + series
for key, s in added.iteritems():
assert_series_equal(s, self.frame[key] + series[key])
larger_series = series.toDict()
larger_series['E'] = 1
larger_series = Series(larger_series)
larger_added = self.frame + larger_series
for key, s in self.frame.iteritems():
assert_series_equal(larger_added[key], s + series[key])
self.assert_('E' in larger_added)
self.assert_(np.isnan(larger_added['E']).all())
# TimeSeries
ts = self.tsframe['A']
added = self.tsframe + ts
for key, col in self.tsframe.iteritems():
assert_series_equal(added[key], col + ts)
smaller_frame = self.tsframe[:-5]
smaller_added = smaller_frame + ts
self.assert_(smaller_added.index.equals(self.tsframe.index))
# length 0
result = self.tsframe + ts[:0]
# Frame is length 0
result = self.tsframe[:0] + ts
self.assertEqual(len(result), 0)
# empty but with non-empty index
frame = self.tsframe[:1].reindex(columns=[])
result = frame * ts
self.assertEqual(len(result), len(ts))
def test_combineFunc(self):
result = self.frame * 2
self.assert_(np.array_equal(result.values, self.frame.values * 2))
result = self.empty * 2
self.assert_(result.index is self.empty.index)
self.assertEqual(len(result.columns), 0)
def test_toCSV_fromcsv(self):
path = '__tmp__'
self.frame['A'][:5] = np.NaN
self.frame.toCSV(path)
self.frame.toCSV(path, cols=['A', 'B'])
self.frame.toCSV(path, header=False)
self.frame.toCSV(path, index=False)
# test roundtrip
self.tsframe.toCSV(path)
recons = self.klass.fromcsv(path)
assert_frame_equal(self.tsframe, recons)
os.remove(path)
def test_toDataMatrix(self):
dm = self.frame.toDataMatrix()
def test_info(self):
pass
def test_rows(self):
self.assert_(self.tsframe.rows() is self.tsframe.index)
def test_cols(self):
self.assert_(self.tsframe.cols() == list(self.tsframe.columns))
def test_columns(self):
pass
def test_iteritems(self):
pass
def test_append(self):
begin_index = self.frame.index[:5]
end_index = self.frame.index[5:]
begin_frame = self.frame.reindex(begin_index)
end_frame = self.frame.reindex(end_index)
appended = begin_frame.append(end_frame)
assert_almost_equal(appended['A'], self.frame['A'])
del end_frame['A']
partial_appended = begin_frame.append(end_frame)
self.assert_('A' in partial_appended)
partial_appended = end_frame.append(begin_frame)
self.assert_('A' in partial_appended)
# mixed type handling
appended = self.mixed_frame[:5].append(self.mixed_frame[5:])
assert_frame_equal(appended, self.mixed_frame)
# what to test here
mixed_appended = self.mixed_frame[:5].append(self.frame[5:])
mixed_appended2 = self.frame[:5].append(self.mixed_frame[5:])
assert_frame_equal(mixed_appended, mixed_appended2)
# append empty
appended = self.frame.append(self.empty)
assert_frame_equal(self.frame, appended)
self.assert_(appended is not self.frame)
appended = self.empty.append(self.frame)
assert_frame_equal(self.frame, appended)
self.assert_(appended is not self.frame)
def test_asfreq(self):
offset_monthly = self.tsframe.asfreq(datetools.bmonthEnd)
rule_monthly = self.tsframe.asfreq('EOM')
assert_almost_equal(offset_monthly['A'], rule_monthly['A'])
filled = rule_monthly.asfreq('WEEKDAY', fillMethod='pad')
def test_asMatrix(self):
frame = self.frame
mat = frame.asMatrix()
smallerCols = ['C', 'A']
frameCols = frame.cols()
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frameCols[j]
if np.isnan(value):
self.assert_(np.isnan(frame[col][i]))
else:
self.assertEqual(value, frame[col][i])
# mixed type
mat = self.mixed_frame.asMatrix(['foo', 'A'])
self.assertEqual(mat[0, 0], 'bar')
def test_values(self):
pass
def test_deepcopy(self):
cp = deepcopy(self.frame)
series = cp['A']
series[:] = 10
for idx, value in series.iteritems():
self.assertNotEqual(self.frame['A'][idx], value)
def test_copy(self):
pass
def test_corr(self):
self.frame['A'][:5] = np.NaN
self.frame['B'][:10] = np.NaN
correls = self.frame.corr()
assert_almost_equal(correls['A']['C'],
self.frame['A'].corr(self.frame['C']))
def test_dropEmptyRows(self):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = np.NaN
frame = self.klass({'foo' : mat}, index=self.frame.index)
smaller_frame = frame.dropEmptyRows()
self.assert_(np.array_equal(smaller_frame['foo'], mat[5:]))
smaller_frame = frame.dropEmptyRows(['foo'])
self.assert_(np.array_equal(smaller_frame['foo'], mat[5:]))
def test_dropIncompleteRows(self):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = np.NaN
frame = self.klass({'foo' : mat}, index=self.frame.index)
frame['bar'] = 5
smaller_frame = frame.dropIncompleteRows()
self.assert_(np.array_equal(smaller_frame['foo'], mat[5:]))
samesize_frame = frame.dropIncompleteRows(specificColumns=['bar'])
self.assert_(samesize_frame.index.equals(self.frame.index))
def test_fill(self):
self.tsframe['A'][:5] = np.NaN
self.tsframe['A'][-5:] = np.NaN
zero_filled = self.tsframe.fill(0)
self.assert_((zero_filled['A'][:5] == 0).all())
padded = self.tsframe.fill(method='pad')
self.assert_(np.isnan(padded['A'][:5]).all())
self.assert_((padded['A'][-5:] == padded['A'][-5]).all())
# mixed type
self.mixed_frame['foo'][5:20] = np.NaN
self.mixed_frame['A'][-10:] = np.NaN
result = self.mixed_frame.fill(value=0)
def test_truncate(self):
offset = datetools.bday
ts = self.tsframe[::3]
start, end = self.tsframe.index[3], self.tsframe.index[6]
start_missing = self.tsframe.index[2]
end_missing = self.tsframe.index[7]
# neither specified
truncated = ts.truncate()
assert_frame_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_frame_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_frame_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_frame_equal(truncated, expected)
def test_getXS(self):
idx = self.frame.index[5]
xs = self.frame.getXS(idx)
for item, value in xs.iteritems():
if np.isnan(value):
self.assert_(np.isnan(self.frame[item][idx]))
else:
self.assertEqual(value, self.frame[item][idx])
# mixed-type getXS
test_data = {
'A' : {'1' : 1, '2' : 2},
'B' : {'1' : '1', '2' : '2', '3' : '3'},
}
frame = self.klass(test_data)
xs = frame.getXS('1')
self.assert_(xs.dtype == np.object_)
self.assertEqual(xs['A'], 1)
self.assertEqual(xs['B'], '1')
self.assertRaises(Exception, self.tsframe.getXS,
self.tsframe.index[0] - datetools.bday)
def test_pivot(self):
data = {
'index' : ['A', 'B', 'C', 'C', 'B', 'A'],
'columns' : ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values' : [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(index='index', columns='columns', values='values')
expected = DataFrame({
'One' : {'A' : 1., 'B' : 2., 'C' : 3.},
'Two' : {'A' : 1., 'B' : 2., 'C' : 3.}
})
assert_frame_equal(pivoted, expected)
# corner cases
def test_reindex(self):
newFrame = self.frame.reindex(self.ts1.index)
for col in newFrame.cols():
for idx, val in newFrame[col].iteritems():
if idx in self.frame.index:
if np.isnan(val):
self.assert_(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assert_(np.isnan(val))
for col, series in newFrame.iteritems():
self.assert_(common.equalContents(series.index, newFrame.index))
emptyFrame = self.frame.reindex(Index([]))
self.assert_(len(emptyFrame.index) == 0)
# Cython code should be unit-tested directly
nonContigFrame = self.frame.reindex(self.ts1.index[::2])
for col in nonContigFrame.cols():
for idx, val in nonContigFrame[col].iteritems():
if idx in self.frame.index:
if np.isnan(val):
self.assert_(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assert_(np.isnan(val))
for col, series in nonContigFrame.iteritems():
self.assert_(common.equalContents(series.index, nonContigFrame.index))
# corner cases
# Same index, copies values
newFrame = self.frame.reindex(self.frame.index)
self.assert_(newFrame.index is self.frame.index)
# length zero
newFrame = self.frame.reindex([])
self.assert_(not newFrame)
self.assertEqual(len(newFrame.cols()), len(self.frame.cols()))
# pass non-Index
newFrame = self.frame.reindex(list(self.ts1.index))
self.assert_(newFrame.index.equals(self.ts1.index))
def test_reindex_int(self):
smaller = self.intframe.reindex(self.intframe.index[::2])
self.assert_(smaller['A'].dtype == np.int_)
bigger = smaller.reindex(self.intframe.index)
self.assert_(bigger['A'].dtype == np.float_)
smaller = self.intframe.reindex(columns=['A', 'B'])
self.assert_(smaller['A'].dtype == np.int_)
def test_rename(self):
mapping = {
'A' : 'a',
'B' : 'b',
'C' : 'c',
'D' : 'd'
}
bad_mapping = {
'A' : 'a',
'B' : 'b',
'C' : 'b',
'D' : 'd'
}
renamed = self.frame.rename(columns=mapping)
renamed2 = self.frame.rename(columns=str.lower)
assert_frame_equal(renamed, renamed2)
assert_frame_equal(renamed2.rename(columns=str.upper),
self.frame)
self.assertRaises(Exception, self.frame.rename,
columns=bad_mapping)
# index
data = {
'A' : {'foo' : 0, 'bar' : 1}
}
# gets sorted alphabetical
df = self.klass(data)
renamed = df.rename(index={'foo' : 'bar', 'bar' : 'foo'})
self.assert_(np.array_equal(renamed.index, ['foo', 'bar']))
renamed = df.rename(index=str.upper)
self.assert_(np.array_equal(renamed.index, ['BAR', 'FOO']))
# have to pass something
self.assertRaises(Exception, self.frame.rename)
def test_reindex_columns(self):
newFrame = self.frame.reindex(columns=['A', 'B', 'E'])
assert_series_equal(newFrame['B'], self.frame['B'])
self.assert_(np.isnan(newFrame['E']).all())
self.assert_('C' not in newFrame)
# length zero
newFrame = self.frame.reindex(columns=[])
self.assert_(not newFrame)
def test_reindex_mixed(self):
pass
def test_transpose(self):
frame = self.frame
dft = frame.T
for idx, series in dft.iteritems():
for col, value in series.iteritems():
if np.isnan(value):
self.assert_(np.isnan(frame[col][idx]))
else:
self.assertEqual(value, frame[col][idx])
# mixed type
index, data = common.getMixedTypeDict()
mixed = self.klass(data, index=index)
mixed_T = mixed.T
for col, s in mixed_T.iteritems():
self.assert_(s.dtype == np.object_)
def test_diff(self):
the_diff = self.tsframe.diff(1)
assert_series_equal(the_diff['A'],
self.tsframe['A'] - self.tsframe['A'].shift(1))
def test_shift(self):
# naive shift
shiftedFrame = self.tsframe.shift(5)
self.assert_(shiftedFrame.index.equals(self.tsframe.index))
shiftedSeries = self.tsframe['A'].shift(5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
shiftedFrame = self.tsframe.shift(-5)
self.assert_(shiftedFrame.index.equals(self.tsframe.index))
shiftedSeries = self.tsframe['A'].shift(-5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
# shift by 0
unshifted = self.tsframe.shift(0)
assert_frame_equal(unshifted, self.tsframe)
# shift by DateOffset
shiftedFrame = self.tsframe.shift(5, offset=datetools.BDay())
self.assert_(len(shiftedFrame) == len(self.tsframe))
shiftedFrame2 = self.tsframe.shift(5, timeRule='WEEKDAY')
assert_frame_equal(shiftedFrame, shiftedFrame2)
d = self.tsframe.index[0]
shifted_d = d + datetools.BDay(5)
assert_series_equal(self.tsframe.getXS(d),
shiftedFrame.getXS(shifted_d))
def test_apply(self):
# ufunc
applied = self.frame.apply(np.sqrt)
assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
self.assertEqual(applied['A'], np.mean(self.frame['A']))
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
self.assertEqual(applied[d], np.mean(self.frame.getXS(d)))
self.assert_(applied.index is self.frame.index) # want this
# empty
applied = self.empty.apply(np.sqrt)
self.assert_(not applied)
applied = self.empty.apply(np.mean)
self.assert_(not applied)
def test_tapply(self):
d = self.frame.index[0]
tapplied = self.frame.tapply(np.mean)
self.assertEqual(tapplied[d], np.mean(self.frame.getXS(d)))
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
assert_frame_equal(applied, self.frame * 2)
result = self.frame.applymap(type)
def test_groupby(self):
grouped = self.tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
self.assertEqual(len(aggregated), 5)
self.assertEqual(len(aggregated.cols()), 4)
# by string
tscopy = self.tsframe.copy()
tscopy['weekday'] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby('weekday').aggregate(np.mean)
del stragged['weekday']
assert_frame_equal(stragged, aggregated)
# transform
transformed = grouped.transform(lambda x: x - x.mean())
self.assertEqual(len(transformed), 30)
self.assertEqual(len(transformed.cols()), 4)
# iterate
for weekday, group in grouped:
self.assert_(group.index[0].weekday() == weekday)
# groups / group_indices
groups = grouped.groups
indices = grouped.group_indices
for k, v in groups.iteritems():
samething = self.tsframe.index.take(indices[k])
self.assert_(np.array_equal(v, samething))
def test_groupby_columns(self):
mapping = {
'A' : 0, 'B' : 0, 'C' : 1, 'D' : 1
}
grouped = self.tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
self.assertEqual(len(aggregated), len(self.tsframe))
self.assertEqual(len(aggregated.cols()), 2)
# iterate
for k, v in grouped:
self.assertEqual(len(v.cols()), 2)
# tgroupby
grouping = {
'A' : 0,
'B' : 1,
'C' : 0,
'D' : 1
}
grouped = self.frame.tgroupby(grouping.get, np.mean)
self.assertEqual(len(grouped), len(self.frame.index))
self.assertEqual(len(grouped.cols()), 2)
def test_filter(self):
# items
filtered = self.frame.filter(['A', 'B', 'E'])
self.assertEqual(len(filtered.cols()), 2)
self.assert_('E' not in filtered)
# like
fcopy = self.frame.copy()
fcopy['AA'] = 1
filtered = fcopy.filter(like='A')
self.assertEqual(len(filtered.cols()), 2)
self.assert_('AA' in filtered)
# regex
filtered = fcopy.filter(regex='[A]+')
self.assertEqual(len(filtered.cols()), 2)
self.assert_('AA' in filtered)
# pass in None
self.assertRaises(Exception, self.frame.filter, items=None)
def test_sort(self):
# what to test?
sorted = self.frame.sort()
sorted_A = self.frame.sort(column='A')
sorted = self.frame.sort(ascending=False)
sorted_A = self.frame.sort(column='A', ascending=False)
def test_combineFirst(self):
# disjoint
head, tail = self.frame[:5], self.frame[5:]
combined = head.combineFirst(tail)
reordered_frame = self.frame.reindex(combined.index)
assert_frame_equal(combined, reordered_frame)
self.assert_(common.equalContents(combined.cols(), self.frame.cols()))
assert_series_equal(combined['A'], reordered_frame['A'])
# same index
fcopy = self.frame.copy()
fcopy['A'] = 1
del fcopy['C']
fcopy2 = self.frame.copy()
fcopy2['B'] = 0
del fcopy2['D']
combined = fcopy.combineFirst(fcopy2)
self.assert_((combined['A'] == 1).all())
assert_series_equal(combined['B'], fcopy['B'])
assert_series_equal(combined['C'], fcopy2['C'])
assert_series_equal(combined['D'], fcopy['D'])
# overlap
head, tail = reordered_frame[:10].copy(), reordered_frame
head['A'] = 1
combined = head.combineFirst(tail)
self.assert_((combined['A'][:10] == 1).all())
# reverse overlap
tail['A'][:10] = 0
combined = tail.combineFirst(head)
self.assert_((combined['A'][:10] == 0).all())
# no overlap
f = self.frame[:10]
g = self.frame[10:]
combined = f.combineFirst(g)
assert_series_equal(combined['A'].reindex(f.index), f['A'])
assert_series_equal(combined['A'].reindex(g.index), g['A'])
# corner cases
comb = self.frame.combineFirst(self.empty)
assert_frame_equal(comb, self.frame)
comb = self.empty.combineFirst(self.frame)
assert_frame_equal(comb, self.frame)
def test_combineAdd(self):
# trivial
comb = self.frame.combineAdd(self.frame)
assert_frame_equal(comb, self.frame * 2)
# corner cases
comb = self.frame.combineAdd(self.empty)
assert_frame_equal(comb, self.frame)
comb = self.empty.combineAdd(self.frame)
assert_frame_equal(comb, self.frame)
def test_combineMult(self):
# trivial
comb = self.frame.combineMult(self.frame)
assert_frame_equal(comb, self.frame ** 2)
# corner cases
comb = self.frame.combineMult(self.empty)
assert_frame_equal(comb, self.frame)
comb = self.empty.combineMult(self.frame)
assert_frame_equal(comb, self.frame)
def test_join_index(self):
# left / right
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2)
self.assert_(f.index.equals(joined.index))
self.assertEqual(len(joined.cols()), 4)
joined = f.join(f2, how='left')
self.assert_(joined.index.equals(f.index))
self.assertEqual(len(joined.cols()), 4)
joined = f.join(f2, how='right')
self.assert_(joined.index.equals(f2.index))
self.assertEqual(len(joined.cols()), 4)
# corner case
self.assertRaises(Exception, self.frame.join, self.frame,
how='left')
# inner
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2, how='inner')
self.assert_(joined.index.equals(f.index.intersection(f2.index)))
self.assertEqual(len(joined.cols()), 4)
# corner case
self.assertRaises(Exception, self.frame.join, self.frame,
how='inner')
# outer
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2, how='outer')
self.assert_(common.equalContents(self.frame.index, joined.index))
self.assertEqual(len(joined.cols()), 4)
# corner case
self.assertRaises(Exception, self.frame.join, self.frame,
how='outer')
self.assertRaises(Exception, f.join, f2, how='foo')
def test_join(self):
index, data = common.getMixedTypeDict()
target = self.klass(data, index=index)
# Join on string value
source = self.klass({'MergedA' : data['A'], 'MergedD' : data['D']},
index=data['C'])
merged = target.join(source, on='C')
self.assert_(np.array_equal(merged['MergedA'], target['A']))
self.assert_(np.array_equal(merged['MergedD'], target['D']))
# Test when some are missing
# merge column not p resent
self.assertRaises(Exception, target.join, source, on='E')
# corner cases
# nothing to merge
merged = target.join(source.reindex([]), on='C')
# overlap
source_copy = source.copy()
source_copy['A'] = 0
self.assertRaises(Exception, target.join, source_copy, on='A')
# can't specify how
self.assertRaises(Exception, target.join, source, on='C',
how='left')
def test_clip(self):
median = self.frame.median().median()
capped = self.frame.clip_upper(median)
self.assert_(not (capped.values > median).any())
floored = self.frame.clip_lower(median)
self.assert_(not (floored.values < median).any())
def test_statistics(self):
sumFrame = self.frame.apply(np.sum)
for col, series in self.frame.iteritems():
self.assertEqual(sumFrame[col], series.sum())
def _check_statistic(self, frame, name, alternative):
f = getattr(frame, name)
result = f(axis=0)
assert_series_equal(result, frame.apply(alternative))
result = f(axis=1)
comp = frame.apply(alternative, axis=1).reindex(result.index)
assert_series_equal(result, comp)
self.assertRaises(Exception, f, axis=2)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_statistic(self.frame, 'count', f)
def test_sum(self):
def f(x):
x = np.asarray(x)
return x[notnull(x)].sum()
self._check_statistic(self.frame, 'sum', f)
def test_sum_object(self):
values = self.frame.values.astype(int)
frame = self.klass(values, index=self.frame.index,
columns=self.frame.cols())
deltas = frame * timedelta(1)
deltas.sum()
def test_product(self):
def f(x):
x = np.asarray(x)
return np.prod(x[notnull(x)])
self._check_statistic(self.frame, 'product', f)
def test_mean(self):
def f(x):
x = np.asarray(x)
return x[notnull(x)].mean()
self._check_statistic(self.frame, 'mean', f)
def test_median(self):
def f(x):
x = np.asarray(x)
return np.median(x[notnull(x)])
self._check_statistic(self.frame, 'median', f)
def test_min(self):
def f(x):
x = np.asarray(x)
return x[notnull(x)].min()
self._check_statistic(self.frame, 'min', f)
def test_max(self):
def f(x):
x = np.asarray(x)
return x[notnull(x)].max()
self._check_statistic(self.frame, 'max', f)
def test_mad(self):
f = lambda x: np.abs(x - x.mean()).mean()
self._check_statistic(self.frame, 'mad', f)
def test_var(self):
def f(x):
x = np.asarray(x)
return x[notnull(x)].var(ddof=1)
self._check_statistic(self.frame, 'var', f)
def test_std(self):
def f(x):
x = np.asarray(x)
return x[notnull(x)].std(ddof=1)
self._check_statistic(self.frame, 'std', f)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
return
def f(x):
x = np.asarray(x)
return skew(x[notnull(x)], bias=False)
self._check_statistic(self.frame, 'skew', f)
def test_cumsum(self):
cumsum = self.tsframe.cumsum()
assert_series_equal(cumsum['A'], np.cumsum(self.tsframe['A'].fill(0)))
df = self.klass({'A' : np.arange(20)}, index=np.arange(20))
# works
result = df.cumsum()
# fix issue
cumsum_xs = self.tsframe.cumsum(axis=1)
self.assertEqual(np.shape(cumsum_xs), np.shape(self.tsframe))
def test_cumprod(self):
cumprod = self.tsframe.cumprod()
assert_series_equal(cumprod['A'],
np.cumprod(self.tsframe['A'].fill(1)))
# fix issue
cumprod_xs = self.tsframe.cumprod(axis=1)
self.assertEqual(np.shape(cumprod_xs), np.shape(self.tsframe))
if __name__ == '__main__':
unittest.main()
|
|
"""
Support for Google travel time sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.google_travel_time/
"""
from datetime import datetime
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
from homeassistant.const import (
CONF_API_KEY, CONF_NAME, EVENT_HOMEASSISTANT_START, ATTR_LATITUDE,
ATTR_LONGITUDE)
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
import homeassistant.helpers.location as location
import homeassistant.util.dt as dt_util
REQUIREMENTS = ['googlemaps==2.4.4']
_LOGGER = logging.getLogger(__name__)
CONF_DESTINATION = 'destination'
CONF_MODE = 'mode'
CONF_OPTIONS = 'options'
CONF_ORIGIN = 'origin'
CONF_TRAVEL_MODE = 'travel_mode'
DEFAULT_NAME = 'Google Travel Time'
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5)
ALL_LANGUAGES = ['ar', 'bg', 'bn', 'ca', 'cs', 'da', 'de', 'el', 'en', 'es',
'eu', 'fa', 'fi', 'fr', 'gl', 'gu', 'hi', 'hr', 'hu', 'id',
'it', 'iw', 'ja', 'kn', 'ko', 'lt', 'lv', 'ml', 'mr', 'nl',
'no', 'pl', 'pt', 'pt-BR', 'pt-PT', 'ro', 'ru', 'sk', 'sl',
'sr', 'sv', 'ta', 'te', 'th', 'tl', 'tr', 'uk', 'vi',
'zh-CN', 'zh-TW']
AVOID = ['tolls', 'highways', 'ferries', 'indoor']
TRANSIT_PREFS = ['less_walking', 'fewer_transfers']
TRANSPORT_TYPE = ['bus', 'subway', 'train', 'tram', 'rail']
TRAVEL_MODE = ['driving', 'walking', 'bicycling', 'transit']
TRAVEL_MODEL = ['best_guess', 'pessimistic', 'optimistic']
UNITS = ['metric', 'imperial']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_DESTINATION): cv.string,
vol.Required(CONF_ORIGIN): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TRAVEL_MODE): vol.In(TRAVEL_MODE),
vol.Optional(CONF_OPTIONS, default={CONF_MODE: 'driving'}): vol.All(
dict, vol.Schema({
vol.Optional(CONF_MODE, default='driving'): vol.In(TRAVEL_MODE),
vol.Optional('language'): vol.In(ALL_LANGUAGES),
vol.Optional('avoid'): vol.In(AVOID),
vol.Optional('units'): vol.In(UNITS),
vol.Exclusive('arrival_time', 'time'): cv.string,
vol.Exclusive('departure_time', 'time'): cv.string,
vol.Optional('traffic_model'): vol.In(TRAVEL_MODEL),
vol.Optional('transit_mode'): vol.In(TRANSPORT_TYPE),
vol.Optional('transit_routing_preference'): vol.In(TRANSIT_PREFS)
}))
})
TRACKABLE_DOMAINS = ['device_tracker', 'sensor', 'zone']
def convert_time_to_utc(timestr):
"""Take a string like 08:00:00 and convert it to a unix timestamp."""
combined = datetime.combine(dt_util.start_of_local_day(),
dt_util.parse_time(timestr))
if combined < datetime.now():
combined = combined + timedelta(days=1)
return dt_util.as_timestamp(combined)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup the Google travel time platform."""
# pylint: disable=too-many-locals
def run_setup(event):
"""Delay the setup until Home Assistant is fully initialized.
This allows any entities to be created already
"""
options = config.get(CONF_OPTIONS)
if options.get('units') is None:
options['units'] = hass.config.units.name
travel_mode = config.get(CONF_TRAVEL_MODE)
mode = options.get(CONF_MODE)
if travel_mode is not None:
wstr = ("Google Travel Time: travel_mode is deprecated, please "
"add mode to the options dictionary instead!")
_LOGGER.warning(wstr)
if mode is None:
options[CONF_MODE] = travel_mode
titled_mode = options.get(CONF_MODE).title()
formatted_name = "{} - {}".format(DEFAULT_NAME, titled_mode)
name = config.get(CONF_NAME, formatted_name)
api_key = config.get(CONF_API_KEY)
origin = config.get(CONF_ORIGIN)
destination = config.get(CONF_DESTINATION)
sensor = GoogleTravelTimeSensor(hass, name, api_key, origin,
destination, options)
if sensor.valid_api_connection:
add_devices_callback([sensor])
# Wait until start event is sent to load this component.
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, run_setup)
# pylint: disable=too-many-instance-attributes
class GoogleTravelTimeSensor(Entity):
"""Representation of a Google travel time sensor."""
# pylint: disable=too-many-arguments
def __init__(self, hass, name, api_key, origin, destination, options):
"""Initialize the sensor."""
self._hass = hass
self._name = name
self._options = options
self._unit_of_measurement = 'min'
self._matrix = None
self.valid_api_connection = True
# Check if location is a trackable entity
if origin.split('.', 1)[0] in TRACKABLE_DOMAINS:
self._origin_entity_id = origin
else:
self._origin = origin
if destination.split('.', 1)[0] in TRACKABLE_DOMAINS:
self._destination_entity_id = destination
else:
self._destination = destination
import googlemaps
self._client = googlemaps.Client(api_key, timeout=10)
try:
self.update()
except googlemaps.exceptions.ApiError as exp:
_LOGGER .error(exp)
self.valid_api_connection = False
return
@property
def state(self):
"""Return the state of the sensor."""
if self._matrix is None:
return None
_data = self._matrix['rows'][0]['elements'][0]
if 'duration_in_traffic' in _data:
return round(_data['duration_in_traffic']['value']/60)
if 'duration' in _data:
return round(_data['duration']['value']/60)
return None
@property
def name(self):
"""Get the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._matrix is None:
return None
res = self._matrix.copy()
res.update(self._options)
del res['rows']
_data = self._matrix['rows'][0]['elements'][0]
if 'duration_in_traffic' in _data:
res['duration_in_traffic'] = _data['duration_in_traffic']['text']
if 'duration' in _data:
res['duration'] = _data['duration']['text']
if 'distance' in _data:
res['distance'] = _data['distance']['text']
return res
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from Google."""
options_copy = self._options.copy()
dtime = options_copy.get('departure_time')
atime = options_copy.get('arrival_time')
if dtime is not None and ':' in dtime:
options_copy['departure_time'] = convert_time_to_utc(dtime)
elif dtime is not None:
options_copy['departure_time'] = dtime
elif atime is None:
options_copy['departure_time'] = 'now'
if atime is not None and ':' in atime:
options_copy['arrival_time'] = convert_time_to_utc(atime)
elif atime is not None:
options_copy['arrival_time'] = atime
# Convert device_trackers to google friendly location
if hasattr(self, '_origin_entity_id'):
self._origin = self._get_location_from_entity(
self._origin_entity_id
)
if hasattr(self, '_destination_entity_id'):
self._destination = self._get_location_from_entity(
self._destination_entity_id
)
self._destination = self._resolve_zone(self._destination)
self._origin = self._resolve_zone(self._origin)
if self._destination is not None and self._origin is not None:
self._matrix = self._client.distance_matrix(self._origin,
self._destination,
**options_copy)
def _get_location_from_entity(self, entity_id):
"""Get the location from the entity state or attributes."""
entity = self._hass.states.get(entity_id)
if entity is None:
_LOGGER.error("Unable to find entity %s", entity_id)
self.valid_api_connection = False
return None
# Check if the entity has location attributes
if location.has_location(entity):
return self._get_location_from_attributes(entity)
# Check if device is in a zone
zone_entity = self._hass.states.get("zone.%s" % entity.state)
if location.has_location(zone_entity):
_LOGGER.debug(
"%s is in %s, getting zone location.",
entity_id, zone_entity.entity_id
)
return self._get_location_from_attributes(zone_entity)
# If zone was not found in state then use the state as the location
if entity_id.startswith("sensor."):
return entity.state
# When everything fails just return nothing
return None
@staticmethod
def _get_location_from_attributes(entity):
"""Get the lat/long string from an entities attributes."""
attr = entity.attributes
return "%s,%s" % (attr.get(ATTR_LATITUDE), attr.get(ATTR_LONGITUDE))
def _resolve_zone(self, friendly_name):
entities = self._hass.states.all()
for entity in entities:
if entity.domain == 'zone' and entity.name == friendly_name:
return self._get_location_from_attributes(entity)
return friendly_name
|
|
"""Page CMS page_tags template tags"""
from django import template
from django.utils.safestring import SafeText
from django.template import TemplateSyntaxError
from django.conf import settings
from django.utils.text import unescape_string_literal
from django import forms
from django.template.loader import get_template
try:
from django.templatetags.static import static
except ImportError:
from django.contrib.staticfiles.templatetags.staticfiles import static
from pages import settings as pages_settings
from pages.models import Content, Page
from pages.placeholders import (
PlaceholderNode, ImagePlaceholderNode, FilePlaceholderNode
)
from pages.placeholders import ContactPlaceholderNode, MarkdownPlaceholderNode
from pages.placeholders import JsonPlaceholderNode, parse_placeholder
import urllib
from pages.utils import get_placeholders
register = template.Library()
def get_page_from_string_or_id(page_string, lang=None):
"""Return a Page object from a slug or an id."""
if type(page_string) == int:
return Page.objects.get(pk=int(page_string))
# if we have a string coming from some templates templates
if (isinstance(page_string, SafeText) or
isinstance(page_string, str)):
if page_string.isdigit():
return Page.objects.get(pk=int(page_string))
return Page.objects.from_path(page_string, lang)
# in any other case we return the input becasue it's probably
# a Page object.
return page_string
def _get_content(context, page, content_type, lang, fallback=True):
"""Helper function used by ``PlaceholderNode``."""
if not page:
return ''
if not lang and 'lang' in context:
lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE)
page = get_page_from_string_or_id(page, lang)
if not page:
return ''
content = Content.objects.get_content(page, lang, content_type, fallback)
return content
"""Filters"""
def has_content_in(page, language):
"""Fitler that return ``True`` if the page has any content in a
particular language.
:param page: the current page
:param language: the language you want to look at
"""
if page is None:
return False
return Content.objects.filter(page=page, language=language).count() > 0
register.filter(has_content_in)
"""Inclusion tags"""
def pages_menu(context, page, url='/'):
"""Render a nested list of all the descendents of the given page,
including this page.
:param page: the page where to start the menu from.
:param url: not used anymore.
"""
lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE)
page = get_page_from_string_or_id(page, lang)
if page:
children = page.get_children_for_frontend()
context.update({'children': children, 'page': page})
return context
pages_menu = register.inclusion_tag('pages/menu.html',
takes_context=True)(pages_menu)
def pages_sub_menu(context, page, url='/'):
"""Get the root page of the given page and
render a nested list of all root's children pages.
Good for rendering a secondary menu.
:param page: the page where to start the menu from.
:param url: not used anymore.
"""
lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE)
page = get_page_from_string_or_id(page, lang)
if page:
root = page.get_root()
children = root.get_children_for_frontend()
context.update({'children': children, 'page': page})
return context
pages_sub_menu = register.inclusion_tag('pages/sub_menu.html',
takes_context=True)(pages_sub_menu)
def pages_siblings_menu(context, page, url='/'):
"""Get the parent page of the given page and render a nested list of its
child pages. Good for rendering a secondary menu.
:param page: the page where to start the menu from.
:param url: not used anymore.
"""
lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE)
page = get_page_from_string_or_id(page, lang)
if page:
siblings = page.get_siblings()
context.update({'children': siblings, 'page': page})
return context
pages_siblings_menu = register.inclusion_tag(
'pages/sub_menu.html',
takes_context=True)(pages_siblings_menu)
def pages_admin_menu(context, page):
"""Render the admin table of pages."""
request = context.get('request', None)
expanded = False
has_query = 'q' in context and context['q']
if request and "tree_expanded" in request.COOKIES and not has_query:
cookie_string = urllib.parse.unquote(request.COOKIES['tree_expanded'])
if cookie_string:
ids = [
int(id) for id in
urllib.parse.unquote(
request.COOKIES['tree_expanded']).split(',')
]
if page.id in ids:
expanded = True
context.update({'expanded': expanded, 'page': page})
return context
pages_admin_menu = register.inclusion_tag(
'admin/pages/page/menu.html', takes_context=True
)(pages_admin_menu)
def show_content(context, page, content_type, lang=None, fallback=True):
"""Display a content type from a page.
Example::
{% show_content page_object "title" %}
You can also use the slug of a page::
{% show_content "my-page-slug" "title" %}
Or even the id of a page::
{% show_content 10 "title" %}
:param page: the page object, slug or id
:param content_type: content_type used by a placeholder
:param lang: the wanted language
(default None, use the request object to know)
:param fallback: use fallback content from other language
"""
return {'content': _get_content(
context, page, content_type, lang, fallback)
}
show_content = register.inclusion_tag('pages/content.html',
takes_context=True)(show_content)
def show_absolute_url(context, page, lang=None):
"""
Show the url of a page in the right language
Example ::
{% show_absolute_url page_object %}
You can also use the slug of a page::
{% show_absolute_url "my-page-slug" %}
Keyword arguments:
:param page: the page object, slug or id
:param lang: the wanted language \
(defaults to `settings.PAGE_DEFAULT_LANGUAGE`)
"""
if not lang:
lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE)
page = get_page_from_string_or_id(page, lang)
if not page:
return {'content': ''}
url = page.get_url_path(language=lang)
if url:
return {'content': url}
return {'content': ''}
show_absolute_url = register.inclusion_tag(
'pages/content.html',
takes_context=True)(show_absolute_url)
def show_revisions(context, page, content_type, lang=None):
"""Render the last 10 revisions of a page content with a list using
the ``pages/revisions.html`` template"""
if (not pages_settings.PAGE_CONTENT_REVISION or
content_type in pages_settings.PAGE_CONTENT_REVISION_EXCLUDE_LIST):
return {'revisions': None}
revisions = Content.objects.filter(
page=page, language=lang,
type=content_type).order_by('-creation_date')
if len(revisions) < 2:
return {'revisions': None}
return {'revisions': revisions[0:10]}
show_revisions = register.inclusion_tag(
'pages/revisions.html',
takes_context=True)(show_revisions)
def pages_dynamic_tree_menu(context, page, url='/'):
"""
Render a "dynamic" tree menu, with all nodes expanded which are either
ancestors or the current page itself.
Override ``pages/dynamic_tree_menu.html`` if you want to change the
design.
:param page: the current page
:param url: not used anymore
"""
lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE)
page = get_page_from_string_or_id(page, lang)
children = None
if page and 'current_page' in context:
current_page = context['current_page']
# if this node is expanded, we also have to render its children
# a node is expanded if it is the current node or one of its ancestors
if(
page.tree_id == current_page.tree_id and
page.lft <= current_page.lft and
page.rght >= current_page.rght
):
children = page.get_children_for_frontend()
context.update({'children': children, 'page': page})
return context
pages_dynamic_tree_menu = register.inclusion_tag(
'pages/dynamic_tree_menu.html',
takes_context=True
)(pages_dynamic_tree_menu)
def pages_breadcrumb(context, page, url='/'):
"""
Render a breadcrumb like menu.
Override ``pages/breadcrumb.html`` if you want to change the
design.
:param page: the current page
:param url: not used anymore
"""
lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE)
page = get_page_from_string_or_id(page, lang)
pages_navigation = None
if page:
pages_navigation = page.get_ancestors()
context.update({'pages_navigation': pages_navigation, 'page': page})
return context
pages_breadcrumb = register.inclusion_tag(
'pages/breadcrumb.html',
takes_context=True
)(pages_breadcrumb)
"""Tags"""
class GetPageNode(template.Node):
"""get_page Node"""
def __init__(self, page_filter, varname):
self.page_filter = page_filter
self.varname = varname
def render(self, context):
page_or_id = self.page_filter.resolve(context)
page = get_page_from_string_or_id(page_or_id)
context[self.varname] = page
return ''
def do_get_page(parser, token):
"""Retrieve a page and insert into the template's context.
Example::
{% get_page "news" as news_page %}
:param page: the page object, slug or id
:param name: name of the context variable to store the page in
"""
bits = token.split_contents()
if 4 != len(bits):
raise TemplateSyntaxError('%r expects 4 arguments' % bits[0])
if bits[-2] != 'as':
raise TemplateSyntaxError(
'%r expects "as" as the second argument' % bits[0])
page_filter = parser.compile_filter(bits[1])
varname = bits[-1]
return GetPageNode(page_filter, varname)
do_get_page = register.tag('get_page', do_get_page)
class GetContentNode(template.Node):
"""Get content node"""
def __init__(self, page, content_type, varname, lang, lang_filter):
self.page = page
self.content_type = content_type
self.varname = varname
self.lang = lang
self.lang_filter = lang_filter
def render(self, context):
if self.lang_filter:
self.lang = self.lang_filter.resolve(context)
context[self.varname] = _get_content(
context,
self.page.resolve(context),
self.content_type.resolve(context),
self.lang
)
return ''
def do_get_content(parser, token):
"""Retrieve a Content object and insert it into the template's context.
Example::
{% get_content page_object "title" as content %}
You can also use the slug of a page::
{% get_content "my-page-slug" "title" as content %}
Syntax::
{% get_content page type [lang] as name %}
:param page: the page object, slug or id
:param type: content_type used by a placeholder
:param name: name of the context variable to store the content in
:param lang: the wanted language
"""
bits = token.split_contents()
if not 5 <= len(bits) <= 6:
raise TemplateSyntaxError('%r expects 4 or 5 arguments' % bits[0])
if bits[-2] != 'as':
raise TemplateSyntaxError(
'%r expects "as" as the second last argument' % bits[0])
page = parser.compile_filter(bits[1])
content_type = parser.compile_filter(bits[2])
varname = bits[-1]
lang = None
lang_filter = None
if len(bits) == 6:
lang = bits[3]
else:
lang_filter = parser.compile_filter("lang")
return GetContentNode(page, content_type, varname, lang, lang_filter)
do_get_content = register.tag('get_content', do_get_content)
class LoadPagesNode(template.Node):
"""Load page node."""
def render(self, context):
if 'pages_navigation' not in context:
pages = Page.objects.navigation().order_by("tree_id")
context.update({'pages_navigation': pages})
if 'current_page' not in context:
context.update({'current_page': None})
return ''
def do_load_pages(parser, token):
"""Load the navigation pages, lang, and current_page variables into the
current context.
Example::
<ul>
{% load_pages %}
{% for page in pages_navigation %}
{% pages_menu page %}
{% endfor %}
</ul>
"""
return LoadPagesNode()
do_load_pages = register.tag('load_pages', do_load_pages)
class LoadEditNode(template.Node):
"""Load edit node."""
def render(self, context):
request = context.get('request')
if not request.user.is_staff:
return ''
template_name = context.get('template_name')
placeholders = get_placeholders(template_name)
page = context.get('current_page')
if not page:
return ''
lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE)
form = forms.Form()
for p in placeholders:
field = p.get_field(
page, lang, initial=p.get_content_from_context(context))
form.fields[p.name] = field
template = get_template('pages/inline-edit.html')
with context.push():
context['form'] = form
context['edit_enabled'] = request.COOKIES.get('enable_edit_mode')
content = template.render(context.flatten())
return content
def do_load_edit(parser, token):
"""
"""
return LoadEditNode()
do_load_edit = register.tag('pages_edit_init', do_load_edit)
class LoadEditMediaNode(template.Node):
"""Load edit node."""
def render(self, context):
request = context.get('request')
if not request.user.is_staff:
return ''
template_name = context.get('template_name')
placeholders = get_placeholders(template_name)
page = context.get('current_page')
lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE)
form = forms.Form()
for p in placeholders:
field = p.get_field(page, lang)
form.fields[p.name] = field
link = '<link href="{}" type="text/css" media="all" rel="stylesheet" />'.format(
static('pages/css/inline-edit.css')
)
return "{}{}".format(form.media, link)
def do_load_edit_media(parser, token):
"""
"""
return LoadEditMediaNode()
do_load_edit = register.tag('pages_edit_media', do_load_edit_media)
def do_placeholder(parser, token):
"""
Method that parse the placeholder template tag.
Syntax::
{% placeholder <name> [on <page>] [with <widget>] \
[parsed] [as <varname>] %}
Example usage::
{% placeholder about %}
{% placeholder body with TextArea as body_text %}
{% placeholder welcome with TextArea parsed as welcome_text %}
{% placeholder teaser on next_page with TextArea parsed %}
"""
name, params = parse_placeholder(parser, token)
return PlaceholderNode(name, **params)
register.tag('placeholder', do_placeholder)
def do_markdownlaceholder(parser, token):
"""
Method that parse the markdownplaceholder template tag.
"""
name, params = parse_placeholder(parser, token)
return MarkdownPlaceholderNode(name, **params)
register.tag('markdownplaceholder', do_markdownlaceholder)
def do_imageplaceholder(parser, token):
"""
Method that parse the imageplaceholder template tag.
"""
name, params = parse_placeholder(parser, token)
return ImagePlaceholderNode(name, **params)
register.tag('imageplaceholder', do_imageplaceholder)
def do_fileplaceholder(parser, token):
"""
Method that parse the fileplaceholder template tag.
"""
name, params = parse_placeholder(parser, token)
return FilePlaceholderNode(name, **params)
register.tag('fileplaceholder', do_fileplaceholder)
def do_contactplaceholder(parser, token):
"""
Method that parse the contactplaceholder template tag.
"""
name, params = parse_placeholder(parser, token)
return ContactPlaceholderNode(name, **params)
register.tag('contactplaceholder', do_contactplaceholder)
def do_jsonplaceholder(parser, token):
"""
Method that parse the contactplaceholder template tag.
"""
name, params = parse_placeholder(parser, token)
return JsonPlaceholderNode(name, **params)
register.tag('jsonplaceholder', do_jsonplaceholder)
def language_content_up_to_date(page, language):
"""Tell if all the page content has been updated since the last
change of the official version (settings.LANGUAGE_CODE)
This is approximated by comparing the last modified date of any
content in the page, not comparing each content block to its
corresponding official language version. That allows users to
easily make "do nothing" changes to any content block when no
change is required for a language.
"""
lang_code = getattr(settings, 'LANGUAGE_CODE', None)
if lang_code == language:
# official version is always "up to date"
return True
# get the last modified date for the official version
last_modified = Content.objects.filter(
language=lang_code,
page=page).order_by('-creation_date')
if not last_modified:
# no official version
return True
lang_modified = Content.objects.filter(
language=language,
page=page).order_by('-creation_date')[0].creation_date
return lang_modified > last_modified[0].creation_date
register.filter(language_content_up_to_date)
def do_page_has_content(parser, token):
"""
Conditional tag that only renders its nodes if the page
has content for a particular content type. By default the
current page is used.
Syntax::
{% page_has_content <content_type> [<page var name>] %}
...
{%_end page_has_content %}
Example use::
{% page_has_content 'header-image' %}
<img src="{{ MEDIA_URL }}{% imageplaceholder 'header-image' %}">
{% end_page_has_content %}
"""
nodelist = parser.parse(('end_page_has_content',))
parser.delete_first_token()
args = token.split_contents()
try:
content_type = unescape_string_literal(args[1])
except IndexError:
raise template.TemplateSyntaxError(
"%r tag requires the argument content_type" % args[0]
)
if len(args) > 2:
page = args[2]
else:
page = None
return PageHasContentNode(page, content_type, nodelist)
register.tag('page_has_content', do_page_has_content)
class PageHasContentNode(template.Node):
def __init__(self, page, content_type, nodelist):
self.page = page or 'current_page'
self.content_type = content_type
self.nodelist = nodelist
def render(self, context):
page = context.get(self.page)
if not page:
return ''
content = page.get_content(
context.get('lang', None), self.content_type)
if(content):
output = self.nodelist.render(context)
return output
return ''
|
|
# ext/associationproxy.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Contain the ``AssociationProxy`` class.
The ``AssociationProxy`` is a Python property object which provides
transparent proxied access to the endpoint of an association object.
See the example ``examples/association/proxied_association.py``.
"""
import itertools
import operator
import weakref
from .. import exc, orm, util
from ..orm import collections, interfaces
from ..sql import not_, or_
def association_proxy(target_collection, attr, **kw):
"""Return a Python property implementing a view of a target
attribute which references an attribute on members of the
target.
The returned value is an instance of :class:`.AssociationProxy`.
Implements a Python property representing a relationship as a collection
of simpler values, or a scalar value. The proxied property will mimic
the collection type of the target (list, dict or set), or, in the case of
a one to one relationship, a simple scalar value.
:param target_collection: Name of the attribute we'll proxy to.
This attribute is typically mapped by
:func:`~sqlalchemy.orm.relationship` to link to a target collection, but
can also be a many-to-one or non-scalar relationship.
:param attr: Attribute on the associated instance or instances we'll
proxy for.
For example, given a target collection of [obj1, obj2], a list created
by this proxy property would look like [getattr(obj1, *attr*),
getattr(obj2, *attr*)]
If the relationship is one-to-one or otherwise uselist=False, then
simply: getattr(obj, *attr*)
:param creator: optional.
When new items are added to this proxied collection, new instances of
the class collected by the target collection will be created. For list
and set collections, the target class constructor will be called with
the 'value' for the new instance. For dict types, two arguments are
passed: key and value.
If you want to construct instances differently, supply a *creator*
function that takes arguments as above and returns instances.
For scalar relationships, creator() will be called if the target is None.
If the target is present, set operations are proxied to setattr() on the
associated object.
If you have an associated object with multiple attributes, you may set
up multiple association proxies mapping to different attributes. See
the unit tests for examples, and for examples of how creator() functions
can be used to construct the scalar relationship on-demand in this
situation.
:param \*\*kw: Passes along any other keyword arguments to
:class:`.AssociationProxy`.
"""
return AssociationProxy(target_collection, attr, **kw)
ASSOCIATION_PROXY = util.symbol('ASSOCIATION_PROXY')
"""Symbol indicating an :class:`_InspectionAttr` that's
of type :class:`.AssociationProxy`.
Is assigned to the :attr:`._InspectionAttr.extension_type`
attibute.
"""
class AssociationProxy(interfaces._InspectionAttr):
"""A descriptor that presents a read/write view of an object attribute."""
is_attribute = False
extension_type = ASSOCIATION_PROXY
def __init__(self, target_collection, attr, creator=None,
getset_factory=None, proxy_factory=None,
proxy_bulk_set=None):
"""Construct a new :class:`.AssociationProxy`.
The :func:`.association_proxy` function is provided as the usual
entrypoint here, though :class:`.AssociationProxy` can be instantiated
and/or subclassed directly.
:param target_collection: Name of the collection we'll proxy to,
usually created with :func:`.relationship`.
:param attr: Attribute on the collected instances we'll proxy
for. For example, given a target collection of [obj1, obj2], a
list created by this proxy property would look like
[getattr(obj1, attr), getattr(obj2, attr)]
:param creator: Optional. When new items are added to this proxied
collection, new instances of the class collected by the target
collection will be created. For list and set collections, the
target class constructor will be called with the 'value' for the
new instance. For dict types, two arguments are passed:
key and value.
If you want to construct instances differently, supply a 'creator'
function that takes arguments as above and returns instances.
:param getset_factory: Optional. Proxied attribute access is
automatically handled by routines that get and set values based on
the `attr` argument for this proxy.
If you would like to customize this behavior, you may supply a
`getset_factory` callable that produces a tuple of `getter` and
`setter` functions. The factory is called with two arguments, the
abstract type of the underlying collection and this proxy instance.
:param proxy_factory: Optional. The type of collection to emulate is
determined by sniffing the target collection. If your collection
type can't be determined by duck typing or you'd like to use a
different collection implementation, you may supply a factory
function to produce those collections. Only applicable to
non-scalar relationships.
:param proxy_bulk_set: Optional, use with proxy_factory. See
the _set() method for details.
"""
self.target_collection = target_collection
self.value_attr = attr
self.creator = creator
self.getset_factory = getset_factory
self.proxy_factory = proxy_factory
self.proxy_bulk_set = proxy_bulk_set
self.owning_class = None
self.key = '_%s_%s_%s' % (
type(self).__name__, target_collection, id(self))
self.collection_class = None
@property
def remote_attr(self):
"""The 'remote' :class:`.MapperProperty` referenced by this
:class:`.AssociationProxy`.
.. versionadded:: 0.7.3
See also:
:attr:`.AssociationProxy.attr`
:attr:`.AssociationProxy.local_attr`
"""
return getattr(self.target_class, self.value_attr)
@property
def local_attr(self):
"""The 'local' :class:`.MapperProperty` referenced by this
:class:`.AssociationProxy`.
.. versionadded:: 0.7.3
See also:
:attr:`.AssociationProxy.attr`
:attr:`.AssociationProxy.remote_attr`
"""
return getattr(self.owning_class, self.target_collection)
@property
def attr(self):
"""Return a tuple of ``(local_attr, remote_attr)``.
This attribute is convenient when specifying a join
using :meth:`.Query.join` across two relationships::
sess.query(Parent).join(*Parent.proxied.attr)
.. versionadded:: 0.7.3
See also:
:attr:`.AssociationProxy.local_attr`
:attr:`.AssociationProxy.remote_attr`
"""
return (self.local_attr, self.remote_attr)
def _get_property(self):
return (orm.class_mapper(self.owning_class).
get_property(self.target_collection))
@util.memoized_property
def target_class(self):
"""The intermediary class handled by this :class:`.AssociationProxy`.
Intercepted append/set/assignment events will result
in the generation of new instances of this class.
"""
return self._get_property().mapper.class_
@util.memoized_property
def scalar(self):
"""Return ``True`` if this :class:`.AssociationProxy` proxies a scalar
relationship on the local side."""
scalar = not self._get_property().uselist
if scalar:
self._initialize_scalar_accessors()
return scalar
@util.memoized_property
def _value_is_scalar(self):
return not self._get_property().\
mapper.get_property(self.value_attr).uselist
@util.memoized_property
def _target_is_object(self):
return getattr(self.target_class, self.value_attr).impl.uses_objects
def __get__(self, obj, class_):
if self.owning_class is None:
self.owning_class = class_ and class_ or type(obj)
if obj is None:
return self
if self.scalar:
return self._scalar_get(getattr(obj, self.target_collection))
else:
try:
# If the owning instance is reborn (orm session resurrect,
# etc.), refresh the proxy cache.
creator_id, proxy = getattr(obj, self.key)
if id(obj) == creator_id:
return proxy
except AttributeError:
pass
proxy = self._new(_lazy_collection(obj, self.target_collection))
setattr(obj, self.key, (id(obj), proxy))
return proxy
def __set__(self, obj, values):
if self.owning_class is None:
self.owning_class = type(obj)
if self.scalar:
creator = self.creator and self.creator or self.target_class
target = getattr(obj, self.target_collection)
if target is None:
setattr(obj, self.target_collection, creator(values))
else:
self._scalar_set(target, values)
else:
proxy = self.__get__(obj, None)
if proxy is not values:
proxy.clear()
self._set(proxy, values)
def __delete__(self, obj):
if self.owning_class is None:
self.owning_class = type(obj)
delattr(obj, self.key)
def _initialize_scalar_accessors(self):
if self.getset_factory:
get, set = self.getset_factory(None, self)
else:
get, set = self._default_getset(None)
self._scalar_get, self._scalar_set = get, set
def _default_getset(self, collection_class):
attr = self.value_attr
getter = operator.attrgetter(attr)
if collection_class is dict:
setter = lambda o, k, v: setattr(o, attr, v)
else:
setter = lambda o, v: setattr(o, attr, v)
return getter, setter
def _new(self, lazy_collection):
creator = self.creator and self.creator or self.target_class
self.collection_class = util.duck_type_collection(lazy_collection())
if self.proxy_factory:
return self.proxy_factory(
lazy_collection, creator, self.value_attr, self)
if self.getset_factory:
getter, setter = self.getset_factory(self.collection_class, self)
else:
getter, setter = self._default_getset(self.collection_class)
if self.collection_class is list:
return _AssociationList(
lazy_collection, creator, getter, setter, self)
elif self.collection_class is dict:
return _AssociationDict(
lazy_collection, creator, getter, setter, self)
elif self.collection_class is set:
return _AssociationSet(
lazy_collection, creator, getter, setter, self)
else:
raise exc.ArgumentError(
'could not guess which interface to use for '
'collection_class "%s" backing "%s"; specify a '
'proxy_factory and proxy_bulk_set manually' %
(self.collection_class.__name__, self.target_collection))
def _inflate(self, proxy):
creator = self.creator and self.creator or self.target_class
if self.getset_factory:
getter, setter = self.getset_factory(self.collection_class, self)
else:
getter, setter = self._default_getset(self.collection_class)
proxy.creator = creator
proxy.getter = getter
proxy.setter = setter
def _set(self, proxy, values):
if self.proxy_bulk_set:
self.proxy_bulk_set(proxy, values)
elif self.collection_class is list:
proxy.extend(values)
elif self.collection_class is dict:
proxy.update(values)
elif self.collection_class is set:
proxy.update(values)
else:
raise exc.ArgumentError(
'no proxy_bulk_set supplied for custom '
'collection_class implementation')
@property
def _comparator(self):
return self._get_property().comparator
def any(self, criterion=None, **kwargs):
"""Produce a proxied 'any' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
and/or :meth:`.RelationshipProperty.Comparator.has`
operators of the underlying proxied attributes.
"""
if self._value_is_scalar:
value_expr = getattr(
self.target_class, self.value_attr).has(criterion, **kwargs)
else:
value_expr = getattr(
self.target_class, self.value_attr).any(criterion, **kwargs)
# check _value_is_scalar here, otherwise
# we're scalar->scalar - call .any() so that
# the "can't call any() on a scalar" msg is raised.
if self.scalar and not self._value_is_scalar:
return self._comparator.has(
value_expr
)
else:
return self._comparator.any(
value_expr
)
def has(self, criterion=None, **kwargs):
"""Produce a proxied 'has' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
and/or :meth:`.RelationshipProperty.Comparator.has`
operators of the underlying proxied attributes.
"""
if self._target_is_object:
return self._comparator.has(
getattr(self.target_class, self.value_attr).\
has(criterion, **kwargs)
)
else:
if criterion is not None or kwargs:
raise exc.ArgumentError(
"Non-empty has() not allowed for "
"column-targeted association proxy; use ==")
return self._comparator.has()
def contains(self, obj):
"""Produce a proxied 'contains' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
, :meth:`.RelationshipProperty.Comparator.has`,
and/or :meth:`.RelationshipProperty.Comparator.contains`
operators of the underlying proxied attributes.
"""
if self.scalar and not self._value_is_scalar:
return self._comparator.has(
getattr(self.target_class, self.value_attr).contains(obj)
)
else:
return self._comparator.any(**{self.value_attr: obj})
def __eq__(self, obj):
# note the has() here will fail for collections; eq_()
# is only allowed with a scalar.
if obj is None:
return or_(
self._comparator.has(**{self.value_attr: obj}),
self._comparator == None
)
else:
return self._comparator.has(**{self.value_attr: obj})
def __ne__(self, obj):
# note the has() here will fail for collections; eq_()
# is only allowed with a scalar.
return self._comparator.has(
getattr(self.target_class, self.value_attr) != obj)
class _lazy_collection(object):
def __init__(self, obj, target):
self.ref = weakref.ref(obj)
self.target = target
def __call__(self):
obj = self.ref()
if obj is None:
raise exc.InvalidRequestError(
"stale association proxy, parent object has gone out of "
"scope")
return getattr(obj, self.target)
def __getstate__(self):
return {'obj': self.ref(), 'target': self.target}
def __setstate__(self, state):
self.ref = weakref.ref(state['obj'])
self.target = state['target']
class _AssociationCollection(object):
def __init__(self, lazy_collection, creator, getter, setter, parent):
"""Constructs an _AssociationCollection.
This will always be a subclass of either _AssociationList,
_AssociationSet, or _AssociationDict.
lazy_collection
A callable returning a list-based collection of entities (usually an
object attribute managed by a SQLAlchemy relationship())
creator
A function that creates new target entities. Given one parameter:
value. This assertion is assumed::
obj = creator(somevalue)
assert getter(obj) == somevalue
getter
A function. Given an associated object, return the 'value'.
setter
A function. Given an associated object and a value, store that
value on the object.
"""
self.lazy_collection = lazy_collection
self.creator = creator
self.getter = getter
self.setter = setter
self.parent = parent
col = property(lambda self: self.lazy_collection())
def __len__(self):
return len(self.col)
def __bool__(self):
return bool(self.col)
__nonzero__ = __bool__
def __getstate__(self):
return {'parent': self.parent, 'lazy_collection': self.lazy_collection}
def __setstate__(self, state):
self.parent = state['parent']
self.lazy_collection = state['lazy_collection']
self.parent._inflate(self)
class _AssociationList(_AssociationCollection):
"""Generic, converting, list-to-list proxy."""
def _create(self, value):
return self.creator(value)
def _get(self, object):
return self.getter(object)
def _set(self, object, value):
return self.setter(object, value)
def __getitem__(self, index):
return self._get(self.col[index])
def __setitem__(self, index, value):
if not isinstance(index, slice):
self._set(self.col[index], value)
else:
if index.stop is None:
stop = len(self)
elif index.stop < 0:
stop = len(self) + index.stop
else:
stop = index.stop
step = index.step or 1
rng = list(range(index.start or 0, stop, step))
if step == 1:
for i in rng:
del self[index.start]
i = index.start
for item in value:
self.insert(i, item)
i += 1
else:
if len(value) != len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s" % (len(value),
len(rng)))
for i, item in zip(rng, value):
self._set(self.col[i], item)
def __delitem__(self, index):
del self.col[index]
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __getslice__(self, start, end):
return [self._get(member) for member in self.col[start:end]]
def __setslice__(self, start, end, values):
members = [self._create(v) for v in values]
self.col[start:end] = members
def __delslice__(self, start, end):
del self.col[start:end]
def __iter__(self):
"""Iterate over proxied values.
For the actual domain objects, iterate over .col instead or
just use the underlying collection directly from its property
on the parent.
"""
for member in self.col:
yield self._get(member)
raise StopIteration
def append(self, value):
item = self._create(value)
self.col.append(item)
def count(self, value):
return sum([1 for _ in
util.itertools_filter(lambda v: v == value, iter(self))])
def extend(self, values):
for v in values:
self.append(v)
def insert(self, index, value):
self.col[index:index] = [self._create(value)]
def pop(self, index=-1):
return self.getter(self.col.pop(index))
def remove(self, value):
for i, val in enumerate(self):
if val == value:
del self.col[i]
return
raise ValueError("value not in list")
def reverse(self):
"""Not supported, use reversed(mylist)"""
raise NotImplementedError
def sort(self):
"""Not supported, use sorted(mylist)"""
raise NotImplementedError
def clear(self):
del self.col[0:len(self.col)]
def __eq__(self, other):
return list(self) == other
def __ne__(self, other):
return list(self) != other
def __lt__(self, other):
return list(self) < other
def __le__(self, other):
return list(self) <= other
def __gt__(self, other):
return list(self) > other
def __ge__(self, other):
return list(self) >= other
def __cmp__(self, other):
return cmp(list(self), other)
def __add__(self, iterable):
try:
other = list(iterable)
except TypeError:
return NotImplemented
return list(self) + other
def __radd__(self, iterable):
try:
other = list(iterable)
except TypeError:
return NotImplemented
return other + list(self)
def __mul__(self, n):
if not isinstance(n, int):
return NotImplemented
return list(self) * n
__rmul__ = __mul__
def __iadd__(self, iterable):
self.extend(iterable)
return self
def __imul__(self, n):
# unlike a regular list *=, proxied __imul__ will generate unique
# backing objects for each copy. *= on proxied lists is a bit of
# a stretch anyhow, and this interpretation of the __imul__ contract
# is more plausibly useful than copying the backing objects.
if not isinstance(n, int):
return NotImplemented
if n == 0:
self.clear()
elif n > 1:
self.extend(list(self) * (n - 1))
return self
def copy(self):
return list(self)
def __repr__(self):
return repr(list(self))
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (util.callable(func) and func.__name__ == func_name and
not func.__doc__ and hasattr(list, func_name)):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
_NotProvided = util.symbol('_NotProvided')
class _AssociationDict(_AssociationCollection):
"""Generic, converting, dict-to-dict proxy."""
def _create(self, key, value):
return self.creator(key, value)
def _get(self, object):
return self.getter(object)
def _set(self, object, key, value):
return self.setter(object, key, value)
def __getitem__(self, key):
return self._get(self.col[key])
def __setitem__(self, key, value):
if key in self.col:
self._set(self.col[key], key, value)
else:
self.col[key] = self._create(key, value)
def __delitem__(self, key):
del self.col[key]
def __contains__(self, key):
# testlib.pragma exempt:__hash__
return key in self.col
def has_key(self, key):
# testlib.pragma exempt:__hash__
return key in self.col
def __iter__(self):
return iter(self.col.keys())
def clear(self):
self.col.clear()
def __eq__(self, other):
return dict(self) == other
def __ne__(self, other):
return dict(self) != other
def __lt__(self, other):
return dict(self) < other
def __le__(self, other):
return dict(self) <= other
def __gt__(self, other):
return dict(self) > other
def __ge__(self, other):
return dict(self) >= other
def __cmp__(self, other):
return cmp(dict(self), other)
def __repr__(self):
return repr(dict(self.items()))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
if key not in self.col:
self.col[key] = self._create(key, default)
return default
else:
return self[key]
def keys(self):
return self.col.keys()
if util.py2k:
def iteritems(self):
return ((key, self._get(self.col[key])) for key in self.col)
def itervalues(self):
return (self._get(self.col[key]) for key in self.col)
def iterkeys(self):
return self.col.iterkeys()
def values(self):
return [self._get(member) for member in self.col.values()]
def items(self):
return [(k, self._get(self.col[k])) for k in self]
else:
def items(self):
return ((key, self._get(self.col[key])) for key in self.col)
def values(self):
return (self._get(self.col[key]) for key in self.col)
def pop(self, key, default=_NotProvided):
if default is _NotProvided:
member = self.col.pop(key)
else:
member = self.col.pop(key, default)
return self._get(member)
def popitem(self):
item = self.col.popitem()
return (item[0], self._get(item[1]))
def update(self, *a, **kw):
if len(a) > 1:
raise TypeError('update expected at most 1 arguments, got %i' %
len(a))
elif len(a) == 1:
seq_or_map = a[0]
# discern dict from sequence - took the advice from
# http://www.voidspace.org.uk/python/articles/duck_typing.shtml
# still not perfect :(
if hasattr(seq_or_map, 'keys'):
for item in seq_or_map:
self[item] = seq_or_map[item]
else:
try:
for k, v in seq_or_map:
self[k] = v
except ValueError:
raise ValueError(
"dictionary update sequence "
"requires 2-element tuples")
for key, value in kw:
self[key] = value
def copy(self):
return dict(self.items())
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (util.callable(func) and func.__name__ == func_name and
not func.__doc__ and hasattr(dict, func_name)):
func.__doc__ = getattr(dict, func_name).__doc__
del func_name, func
class _AssociationSet(_AssociationCollection):
"""Generic, converting, set-to-set proxy."""
def _create(self, value):
return self.creator(value)
def _get(self, object):
return self.getter(object)
def _set(self, object, value):
return self.setter(object, value)
def __len__(self):
return len(self.col)
def __bool__(self):
if self.col:
return True
else:
return False
__nonzero__ = __bool__
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __iter__(self):
"""Iterate over proxied values.
For the actual domain objects, iterate over .col instead or just use
the underlying collection directly from its property on the parent.
"""
for member in self.col:
yield self._get(member)
raise StopIteration
def add(self, value):
if value not in self:
self.col.add(self._create(value))
# for discard and remove, choosing a more expensive check strategy rather
# than call self.creator()
def discard(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
break
def remove(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
return
raise KeyError(value)
def pop(self):
if not self.col:
raise KeyError('pop from an empty set')
member = self.col.pop()
return self._get(member)
def update(self, other):
for value in other:
self.add(value)
def __ior__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
for value in other:
self.add(value)
return self
def _set(self):
return set(iter(self))
def union(self, other):
return set(self).union(other)
__or__ = union
def difference(self, other):
return set(self).difference(other)
__sub__ = difference
def difference_update(self, other):
for value in other:
self.discard(value)
def __isub__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
for value in other:
self.discard(value)
return self
def intersection(self, other):
return set(self).intersection(other)
__and__ = intersection
def intersection_update(self, other):
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
def __iand__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
return self
def symmetric_difference(self, other):
return set(self).symmetric_difference(other)
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
def __ixor__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
return self
def issubset(self, other):
return set(self).issubset(other)
def issuperset(self, other):
return set(self).issuperset(other)
def clear(self):
self.col.clear()
def copy(self):
return set(self)
def __eq__(self, other):
return set(self) == other
def __ne__(self, other):
return set(self) != other
def __lt__(self, other):
return set(self) < other
def __le__(self, other):
return set(self) <= other
def __gt__(self, other):
return set(self) > other
def __ge__(self, other):
return set(self) >= other
def __repr__(self):
return repr(set(self))
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (util.callable(func) and func.__name__ == func_name and
not func.__doc__ and hasattr(set, func_name)):
func.__doc__ = getattr(set, func_name).__doc__
del func_name, func
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from oslo_utils import timeutils
from senlin.common import exception
from senlin.db.sqlalchemy import api as db_api
from senlin.engine import event as eventm
from senlin.engine import node as nodem
from senlin.profiles import base as profiles_base
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
class TestNode(base.SenlinTestCase):
def setUp(self):
super(TestNode, self).setUp()
self.context = utils.dummy_context()
self.profile = self._create_profile('PROFILE_ID')
self.cluster = self._create_cluster('CLUSTER_ID')
def _create_profile(self, profile_id):
values = {
'id': profile_id,
'type': 'os.nova.server-1.0',
'name': 'test-profile',
'spec': {
'type': 'os.nova.server',
'version': '1.0',
}
}
return db_api.profile_create(self.context, values)
def _create_cluster(self, cluster_id):
values = {
'id': cluster_id,
'profile_id': self.profile.id,
'name': 'test-cluster',
'user': self.context.user,
'project': self.context.project,
'next_index': 1,
}
return db_api.cluster_create(self.context, values)
def _create_node(self, node_id):
values = {
'id': node_id,
'profile_id': self.profile.id,
'cluster_id': self.cluster.id,
'project': self.context.project,
'name': 'node1',
'role': 'test_node',
}
return db_api.node_create(self.context, values)
def test_node_init(self):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
role='first_node')
self.assertIsNone(node.id)
self.assertEqual('node1', node.name)
self.assertEqual('', node.physical_id)
self.assertEqual(self.profile.id, node.profile_id)
self.assertEqual('', node.user)
self.assertEqual('', node.project)
self.assertEqual('', node.domain)
self.assertEqual(self.cluster.id, node.cluster_id)
self.assertEqual(-1, node.index)
self.assertEqual('first_node', node.role)
self.assertIsNone(node.init_time)
self.assertIsNone(node.created_time)
self.assertIsNone(node.updated_time)
self.assertIsNone(node.deleted_time)
self.assertEqual('INIT', node.status)
self.assertEqual('Initializing', node.status_reason)
self.assertEqual({}, node.data)
self.assertEqual({}, node.metadata)
self.assertEqual({}, node.rt)
def test_node_init_random_name(self):
node = nodem.Node(None, self.profile.id, None)
self.assertIsNotNone(node.name)
self.assertEqual(13, len(node.name))
@mock.patch.object(eventm, 'info')
def test_node_store_init(self, mock_info):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context, role='first_node')
self.assertIsNone(node.id)
node_id = node.store(self.context)
self.assertIsNotNone(node_id)
node_info = db_api.node_get(self.context, node_id)
self.assertIsNotNone(node_info)
self.assertEqual('node1', node_info.name)
self.assertEqual('', node_info.physical_id)
self.assertEqual(self.cluster.id, node_info.cluster_id)
self.assertEqual(self.profile.id, node_info.profile_id)
self.assertEqual(self.context.user, node_info.user)
self.assertEqual(self.context.project, node_info.project)
self.assertEqual(self.context.domain, node_info.domain)
self.assertEqual(1, node_info.index)
self.assertEqual('first_node', node.role)
self.assertIsNotNone(node_info.init_time)
self.assertIsNone(node_info.created_time)
self.assertIsNone(node_info.updated_time)
self.assertIsNone(node_info.deleted_time)
self.assertEqual('INIT', node_info.status)
self.assertEqual('Initializing', node_info.status_reason)
self.assertEqual({}, node_info.meta_data)
self.assertEqual({}, node_info.data)
mock_info.assert_called_once_with(self.context, node, 'create')
@mock.patch.object(eventm, 'info')
def test_node_store_update(self, mock_info):
node = nodem.Node('node1', self.profile.id, None)
node_id = node.store(self.context)
mock_info.assert_called_once_with(self.context, node, 'create')
mock_info.reset_mock()
node.name = 'new_name'
new_node_id = node.store(self.context)
self.assertEqual(node_id, new_node_id)
mock_info.assert_called_once_with(self.context, node, 'update')
def test_node_load(self):
ex = self.assertRaises(exception.NodeNotFound,
nodem.Node.load,
self.context, 'non-existent', None)
self.assertEqual('The node (non-existent) could not be found.',
six.text_type(ex))
node = self._create_node('NODE_ID')
node_info = nodem.Node.load(self.context, 'NODE_ID')
self.assertEqual(node.id, node_info.id)
self.assertEqual(node.name, node_info.name)
self.assertEqual(node.physical_id, node_info.physical_id)
self.assertEqual(node.cluster_id, node_info.cluster_id)
self.assertEqual(node.profile_id, node_info.profile_id)
self.assertEqual(node.user, node_info.user)
self.assertEqual(node.project, node_info.project)
self.assertEqual(node.domain, node_info.domain)
self.assertEqual(node.index, node_info.index)
self.assertEqual(node.role, node_info.role)
self.assertEqual(node.init_time, node_info.init_time)
self.assertEqual(node.created_time, node_info.created_time)
self.assertEqual(node.updated_time, node_info.updated_time)
self.assertEqual(node.deleted_time, node_info.deleted_time)
self.assertEqual(node.status, node_info.status)
self.assertEqual(node.status_reason, node_info.status_reason)
self.assertEqual(node.meta_data, node_info.metadata)
self.assertEqual(node.data, node_info.data)
def test_node_load_all(self):
node_info = nodem.Node.load_all(self.context)
self.assertEqual([], [c for c in node_info])
node1 = self._create_node('NODE1')
node2 = self._create_node('NODE2')
# NOTE: we don't test all other parameters because the db api tests
# already covered that
nodes = nodem.Node.load_all(self.context)
self.assertEqual(2, len(nodes))
self.assertEqual(node1.id, nodes[0].id)
self.assertEqual(node2.id, nodes[1].id)
def test_node_to_dict(self):
def _fmt_time(value):
return value and value.isoformat()
node = self._create_node('NODE1')
self.assertIsNotNone(node.id)
expected = {
'id': node.id,
'name': node.name,
'cluster_id': node.cluster_id,
'physical_id': node.physical_id,
'profile_id': node.profile_id,
'user': node.user,
'project': node.project,
'domain': node.domain,
'index': node.index,
'role': node.role,
'init_time': _fmt_time(node.init_time),
'created_time': _fmt_time(node.created_time),
'updated_time': _fmt_time(node.updated_time),
'deleted_time': _fmt_time(node.deleted_time),
'status': node.status,
'status_reason': node.status_reason,
'data': node.data,
'metadata': node.meta_data,
}
result = nodem.Node.load(self.context, 'NODE1')
dt = result.to_dict()
del dt['profile_name']
self.assertEqual(expected, dt)
def test_node_set_status(self):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
node.store(self.context)
self.assertEqual(nodem.Node.INIT, node.status)
self.assertIsNotNone(node.init_time)
self.assertIsNone(node.created_time)
self.assertIsNone(node.updated_time)
# create
node.set_status(self.context, node.CREATING,
reason='Creation in progress')
self.assertEqual('CREATING', node.status)
self.assertEqual('Creation in progress', node.status_reason)
self.assertIsNone(node.created_time)
self.assertIsNone(node.updated_time)
self.assertIsNone(node.deleted_time)
node.set_status(self.context, node.ACTIVE,
reason='Creation succeeded')
self.assertEqual('ACTIVE', node.status)
self.assertEqual('Creation succeeded', node.status_reason)
self.assertIsNotNone(node.created_time)
self.assertIsNone(node.updated_time)
self.assertIsNone(node.deleted_time)
# update
node.set_status(self.context, node.UPDATING,
reason='Update in progress')
self.assertEqual('UPDATING', node.status)
self.assertEqual('Update in progress', node.status_reason)
self.assertIsNotNone(node.created_time)
self.assertIsNone(node.deleted_time)
self.assertIsNone(node.updated_time)
node.set_status(self.context, node.ACTIVE,
reason='Update succeeded')
self.assertEqual('ACTIVE', node.status)
self.assertEqual('Update succeeded', node.status_reason)
self.assertIsNotNone(node.created_time)
self.assertIsNone(node.deleted_time)
self.assertIsNotNone(node.updated_time)
node.set_status(self.context, node.ACTIVE)
self.assertEqual('ACTIVE', node.status)
self.assertIsNotNone(node.created_time)
self.assertIsNone(node.deleted_time)
self.assertIsNotNone(node.updated_time)
# delete
node.set_status(self.context, node.DELETING,
reason='Deletion in progress')
self.assertEqual('DELETING', node.status)
self.assertEqual('Deletion in progress', node.status_reason)
self.assertIsNotNone(node.created_time)
self.assertIsNone(node.deleted_time)
node.set_status(self.context, node.DELETED,
reason='Deletion succeeded')
self.assertEqual('DELETED', node.status)
self.assertEqual('Deletion succeeded', node.status_reason)
self.assertIsNotNone(node.created_time)
self.assertIsNotNone(node.deleted_time)
@mock.patch.object(profiles_base.Profile, 'get_details')
def test_node_get_details(self, mock_details):
node = nodem.Node('node1', self.profile.id, None)
for physical_id in (None, ''):
node.physical_id = physical_id
self.assertEqual({}, node.get_details(self.context))
self.assertEqual(0, mock_details.call_count)
node.physical_id = 'FAKE_ID'
mock_details.return_value = {'foo': 'bar'}
res = node.get_details(self.context)
mock_details.assert_called_once_with(self.context, node)
self.assertEqual({'foo': 'bar'}, res)
@mock.patch.object(eventm, 'warning')
def test_node_handle_exception(self, mock_warning):
ex = exception.ResourceStatusError(resource_id='FAKE_ID',
status='FAKE_STATUS',
reason='FAKE_REASON')
node = nodem.Node('node1', self.profile.id, None)
node.store(self.context)
node._handle_exception(self.context, 'ACTION', 'STATUS', ex)
db_node = db_api.node_get(self.context, node.id)
self.assertEqual(node.ERROR, db_node.status)
self.assertEqual('Profile failed in ACTIOing resource '
'(FAKE_ID) due to: %s' % six.text_type(ex),
db_node.status_reason)
self.assertEqual('FAKE_ID', db_node.physical_id)
mock_warning.assert_called_with(self.context, node, 'ACTION',
'STATUS', six.text_type(ex))
# Exception happens before physical node creation started.
ex = exception.ResourceCreationFailure(rtype='stack',
code=400,
message='Bad request')
node = nodem.Node('node1', self.profile.id, None)
node.store(self.context)
node._handle_exception(self.context, 'CREATE', 'STATUS', ex)
db_node = db_api.node_get(self.context, node.id)
self.assertEqual(node.ERROR, db_node.status)
self.assertEqual('Profile failed in creating node due to: '
'%s' % six.text_type(ex), db_node.status_reason)
self.assertEqual(None, db_node.physical_id)
mock_warning.assert_called_with(self.context, node, 'CREATE',
'STATUS', six.text_type(ex))
@mock.patch.object(eventm, 'info')
@mock.patch.object(nodem.Node, 'store')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(profiles_base.Profile, 'create_object')
def test_node_create(self, mock_create, mock_status, mock_store,
mock_event):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
physical_id = 'fake_id'
mock_create.return_value = physical_id
res = node.do_create(self.context)
self.assertTrue(res)
mock_status.assert_any_call(self.context, node.CREATING,
reason='Creation in progress')
mock_status.assert_any_call(self.context, node.ACTIVE,
'Creation succeeded')
mock_store.assert_called_once_with(self.context)
mock_event.assert_called_once_with(self.context, node, 'create')
self.assertEqual(physical_id, node.physical_id)
def test_node_create_not_init(self):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
node.status = 'NOT_INIT'
res = node.do_create(self.context)
self.assertFalse(res)
@mock.patch.object(eventm, 'info')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(profiles_base.Profile, 'create_object')
def test_node_create_not_created(self, mock_create, mock_status,
mock_event):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
mock_create.return_value = None
res = node.do_create(self.context)
self.assertFalse(res)
mock_status.assert_called_once_with(self.context, node.CREATING,
reason='Creation in progress')
mock_event.assert_called_once_with(self.context, node, 'create')
@mock.patch.object(eventm, 'info')
@mock.patch.object(nodem.Node, '_handle_exception')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(profiles_base.Profile, 'create_object')
def test_node_create_internal_error(self, mock_create, mock_status,
mock_handle_exception,
mock_event):
ex = exception.InternalError(code=500, message='internal error')
mock_create.side_effect = ex
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
res = node.do_create(self.context)
self.assertFalse(res)
mock_handle_exception.assert_called_once_with(self.context,
'create', 'ERROR', ex)
mock_event.assert_called_once_with(self.context, node, 'create')
@mock.patch.object(eventm, 'info')
@mock.patch.object(db_api, 'node_delete')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(profiles_base.Profile, 'delete_object')
def test_node_delete(self, mock_delete, mock_status, mock_db_delete,
mock_event):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
node.physical_id = 'fake_id'
res = node.do_delete(self.context)
self.assertTrue(res)
mock_delete.assert_called_once_with(mock.ANY, node)
mock_db_delete.assert_called_once_with(mock.ANY, node.id, False)
mock_status.assert_called_once_with(self.context, node.DELETING,
reason='Deletion in progress')
mock_event.assert_called_once_with(self.context, node, 'delete')
@mock.patch.object(db_api, 'node_delete')
@mock.patch.object(profiles_base.Profile, 'delete_object')
def test_node_delete_not_created(self, mock_delete, mock_db_delete):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
self.assertEqual('', node.physical_id)
res = node.do_delete(self.context)
self.assertTrue(res)
self.assertFalse(mock_delete.called)
self.assertTrue(mock_db_delete.called)
@mock.patch.object(eventm, 'info')
@mock.patch.object(nodem.Node, '_handle_exception')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(profiles_base.Profile, 'delete_object')
def test_node_delete_resource_status_error(self, mock_delete, mock_status,
mock_handle_exception,
mock_event):
ex = exception.ResourceStatusError(resource_id='id', status='ERROR',
reason='some reason')
mock_delete.side_effect = ex
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
node.physical_id = 'fake_id'
res = node.do_delete(self.context)
self.assertFalse(res)
mock_delete.assert_called_once_with(self.context, node)
mock_handle_exception.assert_called_once_with(self.context, 'delete',
'ERROR', ex)
mock_status.assert_any_call(self.context, 'ERROR',
reason='Deletion failed')
mock_event.assert_called_once_with(self.context, node, 'delete')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(profiles_base.Profile, 'update_object')
def test_node_update(self, mock_update, mock_status):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
new_profile = self._create_profile('NEW_PROFILE_ID')
node.physical_id = 'fake_id'
res = node.do_update(self.context, {'new_profile_id': new_profile.id})
self.assertTrue(res)
mock_update.assert_called_once_with(self.context, node,
new_profile.id)
self.assertEqual('NEW_PROFILE_ID', node.profile_id)
self.assertEqual('NEW_PROFILE_ID', node.rt['profile'].id)
mock_status.assert_any_call(self.context, 'UPDATING',
reason='Update in progress')
mock_status.assert_any_call(self.context, 'ACTIVE',
reason='Update succeeded')
def test_node_update_not_created(self):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
self.assertEqual('', node.physical_id)
res = node.do_update(self.context, 'new_profile_id')
self.assertFalse(res)
@mock.patch.object(nodem.Node, '_handle_exception')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(profiles_base.Profile, 'update_object')
def test_node_update_resource_status_error(self, mock_update, mock_status,
mock_handle_exception):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
ex = exception.ResourceStatusError(resource_id='id', status='ERROR',
reason='some reason')
mock_update.side_effect = ex
new_profile = self._create_profile('NEW_PROFILE_ID')
node.physical_id = 'fake_id'
res = node.do_update(self.context, {'new_profile_id': new_profile.id})
self.assertFalse(res)
mock_handle_exception.assert_called_once_with(self.context, 'update',
'ERROR', ex)
self.assertNotEqual('NEW_PROFILE_ID', node.profile_id)
@mock.patch.object(db_api, 'node_migrate')
def test_node_join_same_cluster(self, mock_migrate):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
node.index = 1
res = node.do_join(self.context, self.cluster.id)
self.assertTrue(res)
self.assertEqual(1, node.index)
self.assertIsNone(node.updated_time)
self.assertFalse(mock_migrate.called)
@mock.patch.object(timeutils, 'utcnow')
@mock.patch.object(profiles_base.Profile, 'join_cluster')
@mock.patch.object(db_api, 'node_migrate')
def test_node_join(self, mock_migrate, mock_join_cluster, mock_time):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
res = node.do_join(self.context, 'NEW_CLUSTER_ID')
self.assertTrue(res)
mock_migrate.assert_called_once_with(self.context, node.id,
'NEW_CLUSTER_ID', mock_time())
mock_join_cluster.assert_called_once_with(self.context, node,
'NEW_CLUSTER_ID')
self.assertEqual('NEW_CLUSTER_ID', node.cluster_id)
self.assertEqual(mock_migrate.return_value.index, node.index)
self.assertIsNotNone(node.updated_time)
@mock.patch.object(db_api, 'node_migrate')
def test_node_leave_no_cluster(self, mock_migrate):
node = nodem.Node('node1', self.profile.id, None, self.context)
self.assertTrue(node.do_leave(self.context))
self.assertFalse(mock_migrate.called)
self.assertIsNone(node.cluster_id)
self.assertIsNone(node.updated_time)
@mock.patch.object(timeutils, 'utcnow')
@mock.patch.object(profiles_base.Profile, 'leave_cluster')
@mock.patch.object(db_api, 'node_migrate')
def test_node_leave(self, mock_migrate, mock_leave_cluster, mock_time):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
res = node.do_leave(self.context)
self.assertTrue(res)
self.assertIsNone(node.cluster_id)
self.assertIsNotNone(node.updated_time)
self.assertEqual(-1, node.index)
mock_migrate.assert_called_once_with(self.context, node.id,
None, mock_time())
mock_leave_cluster.assert_called_once_with(self.context, node)
|
|
'''
Copyright (C) 2011 by Eike Welk.
Test the control.matlab toolbox.
'''
import unittest
import numpy as np
import scipy.signal
from numpy.testing import assert_array_almost_equal
from numpy import array, asarray, matrix, asmatrix, zeros, ones, linspace,\
all, hstack, vstack, c_, r_
from matplotlib.pylab import show, figure, plot, legend, subplot2grid
from control.matlab import ss, step, impulse, initial, lsim, dcgain, \
ss2tf
from control.statesp import _mimo2siso
from control.timeresp import _check_convert_array
from control.exception import slycot_check
import warnings
class TestControlMatlab(unittest.TestCase):
def setUp(self):
pass
def plot_matrix(self):
#Test: can matplotlib correctly plot matrices?
#Yes, but slightly inconvenient
figure()
t = matrix([[ 1.],
[ 2.],
[ 3.],
[ 4.]])
y = matrix([[ 1., 4.],
[ 4., 5.],
[ 9., 6.],
[16., 7.]])
plot(t, y)
#plot(asarray(t)[0], asarray(y)[0])
def make_SISO_mats(self):
"""Return matrices for a SISO system"""
A = matrix([[-81.82, -45.45],
[ 10., -1. ]])
B = matrix([[9.09],
[0. ]])
C = matrix([[0, 0.159]])
D = zeros((1, 1))
return A, B, C, D
def make_MIMO_mats(self):
"""Return matrices for a MIMO system"""
A = array([[-81.82, -45.45, 0, 0 ],
[ 10, -1, 0, 0 ],
[ 0, 0, -81.82, -45.45],
[ 0, 0, 10, -1, ]])
B = array([[9.09, 0 ],
[0 , 0 ],
[0 , 9.09],
[0 , 0 ]])
C = array([[0, 0.159, 0, 0 ],
[0, 0, 0, 0.159]])
D = zeros((2, 2))
return A, B, C, D
def test_dcgain(self):
"""Test function dcgain with different systems"""
if slycot_check():
#Test MIMO systems
A, B, C, D = self.make_MIMO_mats()
gain1 = dcgain(ss(A, B, C, D))
gain2 = dcgain(A, B, C, D)
sys_tf = ss2tf(A, B, C, D)
gain3 = dcgain(sys_tf)
gain4 = dcgain(sys_tf.num, sys_tf.den)
#print("gain1:", gain1)
assert_array_almost_equal(gain1,
array([[0.0269, 0. ],
[0. , 0.0269]]),
decimal=4)
assert_array_almost_equal(gain1, gain2)
assert_array_almost_equal(gain3, gain4)
assert_array_almost_equal(gain1, gain4)
#Test SISO systems
A, B, C, D = self.make_SISO_mats()
gain1 = dcgain(ss(A, B, C, D))
assert_array_almost_equal(gain1,
array([[0.0269]]),
decimal=4)
def test_dcgain_2(self):
"""Test function dcgain with different systems"""
#Create different forms of a SISO system
A, B, C, D = self.make_SISO_mats()
num, den = scipy.signal.ss2tf(A, B, C, D)
# numerator is only a constant here; pick it out to avoid numpy warning
Z, P, k = scipy.signal.tf2zpk(num[0][-1], den)
sys_ss = ss(A, B, C, D)
#Compute the gain with ``dcgain``
gain_abcd = dcgain(A, B, C, D)
gain_zpk = dcgain(Z, P, k)
gain_numden = dcgain(np.squeeze(num), den)
gain_sys_ss = dcgain(sys_ss)
# print('gain_abcd:', gain_abcd, 'gain_zpk:', gain_zpk)
# print('gain_numden:', gain_numden, 'gain_sys_ss:', gain_sys_ss)
#Compute the gain with a long simulation
t = linspace(0, 1000, 1000)
y, _t = step(sys_ss, t)
gain_sim = y[-1]
# print('gain_sim:', gain_sim)
#All gain values must be approximately equal to the known gain
assert_array_almost_equal([gain_abcd, gain_zpk,
gain_numden, gain_sys_ss, gain_sim],
[0.026948, 0.026948, 0.026948, 0.026948,
0.026948],
decimal=6)
def test_step(self):
"""Test function ``step``."""
figure(); plot_shape = (1, 3)
#Test SISO system
A, B, C, D = self.make_SISO_mats()
sys = ss(A, B, C, D)
#print(sys)
#print("gain:", dcgain(sys))
subplot2grid(plot_shape, (0, 0))
t, y = step(sys)
plot(t, y)
subplot2grid(plot_shape, (0, 1))
T = linspace(0, 2, 100)
X0 = array([1, 1])
t, y = step(sys, T, X0)
plot(t, y)
# Test output of state vector
t, y, x = step(sys, return_x=True)
#Test MIMO system
A, B, C, D = self.make_MIMO_mats()
sys = ss(A, B, C, D)
subplot2grid(plot_shape, (0, 2))
t, y = step(sys)
plot(t, y)
def test_impulse(self):
A, B, C, D = self.make_SISO_mats()
sys = ss(A, B, C, D)
figure()
#everything automatically
t, y = impulse(sys)
plot(t, y, label='Simple Case')
#supply time and X0
T = linspace(0, 2, 100)
X0 = [0.2, 0.2]
t, y = impulse(sys, T, X0)
plot(t, y, label='t=0..2, X0=[0.2, 0.2]')
#Test system with direct feed-though, the function should print a warning.
D = [[0.5]]
sys_ft = ss(A, B, C, D)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
t, y = impulse(sys_ft)
plot(t, y, label='Direct feedthrough D=[[0.5]]')
#Test MIMO system
A, B, C, D = self.make_MIMO_mats()
sys = ss(A, B, C, D)
t, y = impulse(sys)
plot(t, y, label='MIMO System')
legend(loc='best')
#show()
def test_initial(self):
A, B, C, D = self.make_SISO_mats()
sys = ss(A, B, C, D)
figure(); plot_shape = (1, 3)
#X0=0 : must produce line at 0
subplot2grid(plot_shape, (0, 0))
t, y = initial(sys)
plot(t, y)
#X0=[1,1] : produces a spike
subplot2grid(plot_shape, (0, 1))
t, y = initial(sys, X0=matrix("1; 1"))
plot(t, y)
#Test MIMO system
A, B, C, D = self.make_MIMO_mats()
sys = ss(A, B, C, D)
#X0=[1,1] : produces same spike as above spike
subplot2grid(plot_shape, (0, 2))
t, y = initial(sys, X0=[1, 1, 0, 0])
plot(t, y)
#show()
#! Old test; no longer functional?? (RMM, 3 Nov 2012)
@unittest.skip("skipping test_check_convert_shape, need to update test")
def test_check_convert_shape(self):
#TODO: check if shape is correct everywhere.
#Correct input ---------------------------------------------
#Recognize correct shape
#Input is array, shape (3,), single legal shape
arr = _check_convert_array(array([1., 2, 3]), [(3,)], 'Test: ')
assert isinstance(arr, np.ndarray)
assert not isinstance(arr, matrix)
#Input is array, shape (3,), two legal shapes
arr = _check_convert_array(array([1., 2, 3]), [(3,), (1,3)], 'Test: ')
assert isinstance(arr, np.ndarray)
assert not isinstance(arr, matrix)
#Input is array, 2D, shape (1,3)
arr = _check_convert_array(array([[1., 2, 3]]), [(3,), (1,3)], 'Test: ')
assert isinstance(arr, np.ndarray)
assert not isinstance(arr, matrix)
#Test special value any
#Input is array, 2D, shape (1,3)
arr = _check_convert_array(array([[1., 2, 3]]), [(4,), (1,"any")], 'Test: ')
assert isinstance(arr, np.ndarray)
assert not isinstance(arr, matrix)
#Input is array, 2D, shape (3,1)
arr = _check_convert_array(array([[1.], [2], [3]]), [(4,), ("any", 1)],
'Test: ')
assert isinstance(arr, np.ndarray)
assert not isinstance(arr, matrix)
#Convert array-like objects to arrays
#Input is matrix, shape (1,3), must convert to array
arr = _check_convert_array(matrix("1. 2 3"), [(3,), (1,3)], 'Test: ')
assert isinstance(arr, np.ndarray)
assert not isinstance(arr, matrix)
#Input is list, shape (1,3), must convert to array
arr = _check_convert_array([[1., 2, 3]], [(3,), (1,3)], 'Test: ')
assert isinstance(arr, np.ndarray)
assert not isinstance(arr, matrix)
#Special treatment of scalars and zero dimensional arrays:
#They are converted to an array of a legal shape, filled with the scalar
#value
arr = _check_convert_array(5, [(3,), (1,3)], 'Test: ')
assert isinstance(arr, np.ndarray)
assert arr.shape == (3,)
assert_array_almost_equal(arr, [5, 5, 5])
#Squeeze shape
#Input is array, 2D, shape (1,3)
arr = _check_convert_array(array([[1., 2, 3]]), [(3,), (1,3)],
'Test: ', squeeze=True)
assert isinstance(arr, np.ndarray)
assert not isinstance(arr, matrix)
assert arr.shape == (3,) #Shape must be squeezed. (1,3) -> (3,)
#Erroneous input -----------------------------------------------------
#test wrong element data types
#Input is array of functions, 2D, shape (1,3)
self.assertRaises(TypeError, _check_convert_array(array([[min, max, all]]),
[(3,), (1,3)], 'Test: ', squeeze=True))
#Test wrong shapes
#Input has shape (4,) but (3,) or (1,3) are legal shapes
self.assertRaises(ValueError, _check_convert_array(array([1., 2, 3, 4]),
[(3,), (1,3)], 'Test: '))
@unittest.skip("skipping test_lsim, need to update test")
def test_lsim(self):
A, B, C, D = self.make_SISO_mats()
sys = ss(A, B, C, D)
figure(); plot_shape = (2, 2)
#Test with arrays
subplot2grid(plot_shape, (0, 0))
t = linspace(0, 1, 100)
u = r_[1:1:50j, 0:0:50j]
y, _t, _x = lsim(sys, u, t)
plot(t, y, label='y')
plot(t, u/10, label='u/10')
legend(loc='best')
#Test with U=None - uses 2nd algorithm which is much faster.
subplot2grid(plot_shape, (0, 1))
t = linspace(0, 1, 100)
x0 = [-1, -1]
y, _t, _x = lsim(sys, U=None, T=t, X0=x0)
plot(t, y, label='y')
legend(loc='best')
#Test with U=0, X0=0
#Correct reaction to zero dimensional special values
subplot2grid(plot_shape, (0, 1))
t = linspace(0, 1, 100)
y, _t, _x = lsim(sys, U=0, T=t, X0=0)
plot(t, y, label='y')
legend(loc='best')
#Test with matrices
subplot2grid(plot_shape, (1, 0))
t = matrix(linspace(0, 1, 100))
u = matrix(r_[1:1:50j, 0:0:50j])
x0 = matrix("0.; 0")
y, t_out, _x = lsim(sys, u, t, x0)
plot(t_out, y, label='y')
plot(t_out, asarray(u/10)[0], label='u/10')
legend(loc='best')
#Test with MIMO system
subplot2grid(plot_shape, (1, 1))
A, B, C, D = self.make_MIMO_mats()
sys = ss(A, B, C, D)
t = matrix(linspace(0, 1, 100))
u = array([r_[1:1:50j, 0:0:50j],
r_[0:1:50j, 0:0:50j]])
x0 = [0, 0, 0, 0]
y, t_out, _x = lsim(sys, u, t, x0)
plot(t_out, y[0], label='y[0]')
plot(t_out, y[1], label='y[1]')
plot(t_out, u[0]/10, label='u[0]/10')
plot(t_out, u[1]/10, label='u[1]/10')
legend(loc='best')
#Test with wrong values for t
#T is None; - special handling: Value error
self.assertRaises(ValueError, lsim(sys, U=0, T=None, x0=0))
#T="hello" : Wrong type
#TODO: better wording of error messages of ``lsim`` and
# ``_check_convert_array``, when wrong type is given.
# Current error message is too cryptic.
self.assertRaises(TypeError, lsim(sys, U=0, T="hello", x0=0))
#T=0; - T can not be zero dimensional, it determines the size of the
# input vector ``U``
self.assertRaises(ValueError, lsim(sys, U=0, T=0, x0=0))
#T is not monotonically increasing
self.assertRaises(ValueError, lsim(sys, U=0, T=[0., 1., 2., 2., 3.], x0=0))
#show()
def assert_systems_behave_equal(self, sys1, sys2):
'''
Test if the behavior of two LTI systems is equal. Raises ``AssertionError``
if the systems are not equal.
Works only for SISO systems.
Currently computes dcgain, and computes step response.
'''
#gain of both systems must be the same
assert_array_almost_equal(dcgain(sys1), dcgain(sys2))
#Results of ``step`` simulation must be the same too
y1, t1 = step(sys1)
y2, t2 = step(sys2, t1)
assert_array_almost_equal(y1, y2)
def test_convert_MIMO_to_SISO(self):
'''Convert mimo to siso systems'''
#Test with our usual systems --------------------------------------------
#SISO PT2 system
As, Bs, Cs, Ds = self.make_SISO_mats()
sys_siso = ss(As, Bs, Cs, Ds)
#MIMO system that contains two independent copies of the SISO system above
Am, Bm, Cm, Dm = self.make_MIMO_mats()
sys_mimo = ss(Am, Bm, Cm, Dm)
# t, y = step(sys_siso)
# plot(t, y, label='sys_siso d=0')
sys_siso_00 = _mimo2siso(sys_mimo, input=0, output=0,
warn_conversion=False)
sys_siso_11 = _mimo2siso(sys_mimo, input=1, output=1,
warn_conversion=False)
#print("sys_siso_00 ---------------------------------------------")
#print(sys_siso_00)
#print("sys_siso_11 ---------------------------------------------")
#print(sys_siso_11)
#gain of converted system and equivalent SISO system must be the same
self.assert_systems_behave_equal(sys_siso, sys_siso_00)
self.assert_systems_behave_equal(sys_siso, sys_siso_11)
#Test with additional systems --------------------------------------------
#They have crossed inputs and direct feedthrough
#SISO system
As = matrix([[-81.82, -45.45],
[ 10., -1. ]])
Bs = matrix([[9.09],
[0. ]])
Cs = matrix([[0, 0.159]])
Ds = matrix([[0.02]])
sys_siso = ss(As, Bs, Cs, Ds)
# t, y = step(sys_siso)
# plot(t, y, label='sys_siso d=0.02')
# legend(loc='best')
#MIMO system
#The upper left sub-system uses : input 0, output 1
#The lower right sub-system uses: input 1, output 0
Am = array([[-81.82, -45.45, 0, 0 ],
[ 10, -1, 0, 0 ],
[ 0, 0, -81.82, -45.45],
[ 0, 0, 10, -1, ]])
Bm = array([[9.09, 0 ],
[0 , 0 ],
[0 , 9.09],
[0 , 0 ]])
Cm = array([[0, 0, 0, 0.159],
[0, 0.159, 0, 0 ]])
Dm = matrix([[0, 0.02],
[0.02, 0 ]])
sys_mimo = ss(Am, Bm, Cm, Dm)
sys_siso_01 = _mimo2siso(sys_mimo, input=0, output=1,
warn_conversion=False)
sys_siso_10 = _mimo2siso(sys_mimo, input=1, output=0,
warn_conversion=False)
# print("sys_siso_01 ---------------------------------------------")
# print(sys_siso_01)
# print("sys_siso_10 ---------------------------------------------")
# print(sys_siso_10)
#gain of converted system and equivalent SISO system must be the same
self.assert_systems_behave_equal(sys_siso, sys_siso_01)
self.assert_systems_behave_equal(sys_siso, sys_siso_10)
def debug_nasty_import_problem():
'''
``*.egg`` files have precedence over ``PYTHONPATH``. Therefore packages
that were installed with ``easy_install``, can not be easily developed with
Eclipse.
See also:
http://bugs.python.org/setuptools/issue53
Use this function to debug the issue.
'''
#print the directories where python searches for modules and packages.
import sys
print('sys.path: -----------------------------------')
for name in sys.path:
print(name)
if __name__ == '__main__':
unittest.main()
# vi:ts=4:sw=4:expandtab
|
|
"""Bokeh ELPDPlot."""
import warnings
import bokeh.plotting as bkp
import numpy as np
from bokeh.models import ColumnDataSource
from bokeh.models.annotations import Title
from bokeh.models.glyphs import Scatter
from ....rcparams import _validate_bokeh_marker, rcParams
from ...plot_utils import _scale_fig_size, color_from_dim, vectorized_to_hex
from .. import show_layout
from . import backend_kwarg_defaults, create_axes_grid
def plot_elpd(
ax,
models,
pointwise_data,
numvars,
figsize,
textsize,
plot_kwargs,
xlabels,
coord_labels,
xdata,
threshold,
legend, # pylint: disable=unused-argument
color,
backend_kwargs,
show,
):
"""Bokeh elpd plot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(
("dpi", "plot.bokeh.figure.dpi"),
),
**backend_kwargs,
}
plot_kwargs = {} if plot_kwargs is None else plot_kwargs
plot_kwargs.setdefault("marker", rcParams["plot.bokeh.marker"])
if isinstance(color, str):
if color in pointwise_data[0].dims:
colors, _ = color_from_dim(pointwise_data[0], color)
plot_kwargs.setdefault("color", vectorized_to_hex(colors))
plot_kwargs.setdefault("color", vectorized_to_hex(color))
# flatten data (data must be flattened after selecting, labeling and coloring)
pointwise_data = [pointwise.values.flatten() for pointwise in pointwise_data]
if numvars == 2:
(figsize, _, _, _, _, markersize) = _scale_fig_size(
figsize, textsize, numvars - 1, numvars - 1
)
plot_kwargs.setdefault("s", markersize)
if ax is None:
ax = create_axes_grid(
1,
figsize=figsize,
squeeze=True,
backend_kwargs=backend_kwargs,
)
ydata = pointwise_data[0] - pointwise_data[1]
_plot_atomic_elpd(
ax, xdata, ydata, *models, threshold, coord_labels, xlabels, True, True, plot_kwargs
)
show_layout(ax, show)
else:
max_plots = (
numvars ** 2 if rcParams["plot.max_subplots"] is None else rcParams["plot.max_subplots"]
)
vars_to_plot = np.sum(np.arange(numvars).cumsum() < max_plots)
if vars_to_plot < numvars:
warnings.warn(
"rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number "
"of resulting ELPD pairwise plots with these variables, generating only a "
"{side}x{side} grid".format(max_plots=max_plots, side=vars_to_plot),
UserWarning,
)
numvars = vars_to_plot
(figsize, _, _, _, _, markersize) = _scale_fig_size(
figsize, textsize, numvars - 2, numvars - 2
)
plot_kwargs.setdefault("s", markersize)
if ax is None:
dpi = backend_kwargs.pop("dpi")
ax = []
for row in range(numvars - 1):
ax_row = []
for col in range(numvars - 1):
if row == 0 and col == 0:
ax_first = bkp.figure(
width=int(figsize[0] / (numvars - 1) * dpi),
height=int(figsize[1] / (numvars - 1) * dpi),
**backend_kwargs,
)
ax_row.append(ax_first)
elif row < col:
ax_row.append(None)
else:
ax_row.append(
bkp.figure(
width=int(figsize[0] / (numvars - 1) * dpi),
height=int(figsize[1] / (numvars - 1) * dpi),
x_range=ax_first.x_range,
y_range=ax_first.y_range,
**backend_kwargs,
)
)
ax.append(ax_row)
ax = np.array(ax)
for i in range(0, numvars - 1):
var1 = pointwise_data[i]
for j in range(0, numvars - 1):
if j < i:
continue
var2 = pointwise_data[j + 1]
ydata = var1 - var2
_plot_atomic_elpd(
ax[j, i],
xdata,
ydata,
models[i],
models[j + 1],
threshold,
coord_labels,
xlabels,
j == numvars - 2,
i == 0,
plot_kwargs,
)
show_layout(ax, show)
return ax
def _plot_atomic_elpd(
ax_,
xdata,
ydata,
model1,
model2,
threshold,
coord_labels,
xlabels,
xlabels_shown,
ylabels_shown,
plot_kwargs,
):
marker = _validate_bokeh_marker(plot_kwargs.get("marker"))
sizes = np.ones(len(xdata)) * plot_kwargs.get("s")
glyph = Scatter(
x="xdata",
y="ydata",
size="sizes",
line_color=plot_kwargs.get("color", "black"),
marker=marker,
)
source = ColumnDataSource(dict(xdata=xdata, ydata=ydata, sizes=sizes))
ax_.add_glyph(source, glyph)
if threshold is not None:
diff_abs = np.abs(ydata - ydata.mean())
bool_ary = diff_abs > threshold * ydata.std()
if coord_labels is None:
coord_labels = xdata.astype(str)
outliers = np.nonzero(bool_ary)[0]
for outlier in outliers:
label = coord_labels[outlier]
ax_.text(
x=[outlier],
y=[ydata[outlier]],
text=label,
text_color="black",
)
if ylabels_shown:
ax_.yaxis.axis_label = "ELPD difference"
else:
ax_.yaxis.minor_tick_line_color = None
ax_.yaxis.major_label_text_font_size = "0pt"
if xlabels_shown:
if xlabels:
ax_.xaxis.ticker = np.arange(0, len(coord_labels))
ax_.xaxis.major_label_overrides = {
str(key): str(value)
for key, value in zip(np.arange(0, len(coord_labels)), list(coord_labels))
}
else:
ax_.xaxis.minor_tick_line_color = None
ax_.xaxis.major_label_text_font_size = "0pt"
title = Title()
title.text = f"{model1} - {model2}"
ax_.title = title
|
|
import math
from Person import Person, calcDist
from primesense import openni2, nite2
import time
import traceback
class KinectInterface(object):
MAX_GESTURE_DISTANCE_FROM_JOINT = 500
def __init__(self, gesture_callback, pose_callback, user_added_callback, user_removed_callback, user_roles_changed):
self.gesture_callback = gesture_callback
self.pose_callback = pose_callback
self.user_added_callback = user_added_callback
self.user_removed_callback = user_removed_callback
self.user_roles_changed = user_roles_changed
def start(self):
openni2.initialize()
nite2.initialize()
self.user_tracker = nite2.UserTracker(None)
self.hand_tracker = nite2.HandTracker(None)
self.hand_tracker.start_gesture_detection(nite2.c_api.NiteGestureType.NITE_GESTURE_CLICK)
#self.hand_tracker.start_gesture_detection(nite2.c_api.NiteGestureType.NITE_GESTURE_WAVE)
self.hand_listener = HandListener(self.hand_tracker, self.gesture_received)
self.user_listener = UserListener(self.user_tracker, self.pose_detected, self.user_added_callback, self.user_removed_callback, self.user_roles_changed)
def stop(self):
print "Closing Kinect interfaces"
#self.hand_listener.close()
#self.user_listener.close()
#nite2.c_api.niteShutdown()
#openni2.c_api.oniShutdown()
nite2.unload()
openni2.unload()
print "Kinect interfaces closed"
def get_joint_positions(self):
positionList = []
for user_id, user in self.user_listener.tracked_users.items():
positions = []
try:
for i in range(15):
position = user.skeleton.get_joint(i).position
x, y = self.user_listener.user_tracker.convert_joint_coordinates_to_depth(position.x, position.y, position.z)
positions.append((x,y))
positionList.append(positions)
except:
traceback.print_exc("Failed saving user position")
return positionList
def get_visible_user_joint_positions(self):
users = {}
for user_id, user in self.user_listener.tracked_users.items():
if user_id not in self.user_listener.visible_users:
continue
if user.role is None:
continue
try:
users[user.role] = self.get_user_joint_positions(user)
except:
traceback.print_exc("Failed saving user position")
return users
def get_user_joint_positions(self, user):
positions = []
for i in range(15):
joint = user.skeleton.get_joint(i)
if joint.positionConfidence < 0.5:
#print "Not confident! Role:", user.role, "Joint:", i, "Conf:", joint.positionConfidence
continue
position = joint.position
x, y = self.user_listener.user_tracker.convert_joint_coordinates_to_depth(position.x, position.y, position.z)
positions.append((x,y))
return positions
def gesture_received(self, gesture):
print "Gesture! Type:", gesture.type, " Position:", gesture.currentPosition
hand_data = self.get_hand(gesture.currentPosition)
if hand_data:
user_id, hand = hand_data
print "Gesture generated by", user_id, hand
self.gesture_callback(user_id, hand, gesture)
def get_hand(self, position):
all_hands = []
for user_id, user in self.user_listener.tracked_users.iteritems():
for hand in (
nite2.c_api.NiteJointType.NITE_JOINT_RIGHT_HAND,
nite2.c_api.NiteJointType.NITE_JOINT_LEFT_HAND
):
hand_position = user.last_joint_positions[hand]
hand_distance_from_gesture = calcDist(position, hand_position)
all_hands.append((user_id, hand, hand_distance_from_gesture))
if not all_hands:
print "No hands tracked!"
return None
closest_hand = min(all_hands, key=lambda hand: hand[2])
if closest_hand[2] > self.MAX_GESTURE_DISTANCE_FROM_JOINT:
print "Can't find hand that generated gesture! Closest is %s mm away." % closest_hand[2]
return None
return closest_hand[:2]
def pose_detected(self, user_id, pose_type):
print "Pose detected! Type:", pose_type, " User:", user_id
self.pose_callback(user_id, pose_type)
class HandListener(nite2.HandTrackerListener):
def __init__(self, hand_tracker, gesture_callback):
super(HandListener, self).__init__(hand_tracker)
self.last_ts = None
self.gesture_callback = gesture_callback
def on_ready_for_next_frame(self):
if not self.hand_tracker:
# Can happen while NiTE2 is being shut down.
return
frame = self.hand_tracker.read_frame()
#self.log_frame_frequency()
self.handle_frame(frame)
def log_frame_frequency(self):
frame_timestamp = time.time()
if self.last_ts is not None:
print "Time delta (HAND): %6d ms" % (1000 * (frame_timestamp - self.last_ts))
self.last_ts = frame_timestamp
def handle_frame(self, frame):
for gesture in frame.gestures:
gesture = nite2.GestureData(gesture)
if gesture.is_complete():
self.gesture_callback(gesture)
class UserListener(nite2.UserTrackerListener):
POSE_HOLD_MINIMUM_TIME = 1
MAX_USERS = 2
def __init__(self, user_tracker, pose_callback, user_added_callback, user_removed_callback, user_roles_changed):
super(UserListener, self).__init__(user_tracker)
self.last_ts = None
self.visible_users = set()
self.skeleton_state = {}
self.tracked_users = {}
self.users_poses = {}
self.pose_callback = pose_callback
self.user_added = user_added_callback
self.user_removed = user_removed_callback
self.user_roles_changed = user_roles_changed
def on_ready_for_next_frame(self):
if not self.user_tracker:
# Can happen while NiTE2 is being shut down.
return
frame = self.user_tracker.read_frame()
frame_timestamp = time.time()
#self.log_frame_frequency()
self.handle_frame(frame, frame_timestamp)
def log_frame_frequency(self):
frame_timestamp = time.time()
if self.last_ts is not None:
print "Time delta (USER): %6d ms" % (1000 * (frame_timestamp - self.last_ts))
self.last_ts = frame_timestamp
def handle_frame(self, frame, frame_timestamp):
for user in frame.users:
self.start_tracking_if_needed(user)
self.track_user_visibility(user)
self.track_skeleton_state(user)
self.update_skeleton(user, frame_timestamp)
self.track_poses(user, frame_timestamp)
self.update_roles()
def start_tracking_if_needed(self, user):
if not user.is_new():
return
if len(self.tracked_users) >= self.MAX_USERS:
print "Found new user %s, but already tracking %s users - skipping" % (user.id, self.MAX_USERS)
return
print "Found new user, starting to track:", user.id
self.user_tracker.start_skeleton_tracking(user.id)
self.user_tracker.start_pose_detection(user.id, nite2.c_api.NitePoseType.NITE_POSE_PSI)
self.user_tracker.start_pose_detection(user.id, nite2.c_api.NitePoseType.NITE_POSE_CROSSED_HANDS)
def track_user_visibility(self, user):
if user.is_visible() and not user.id in self.visible_users:
print "User #%s: Visible" % user.id
self.visible_users.add(user.id)
if not user.is_visible() and user.id in self.visible_users:
print "User #%s: Out of scene" % user.id
self.visible_users.remove(user.id)
def track_skeleton_state(self, user):
if user.id in self.skeleton_state and self.skeleton_state[user.id] != user.skeleton.state:
if user.skeleton.state == nite2.c_api.NiteSkeletonState.NITE_SKELETON_NONE:
print "User #%s: Stopped tracking" % user.id
elif user.skeleton.state == nite2.c_api.NiteSkeletonState.NITE_SKELETON_CALIBRATING:
print "User #%s: Calibrating..." % user.id
elif user.skeleton.state == nite2.c_api.NiteSkeletonState.NITE_SKELETON_TRACKED:
print "User #%s: Tracking!" % user.id
else:
print "User #%s: Calibration Failed... :-|"
self.skeleton_state[user.id] = user.skeleton.state
def update_skeleton(self, user, frame_timestamp):
if user.skeleton.state == nite2.c_api.NiteSkeletonState.NITE_SKELETON_TRACKED:
person = self.tracked_users.get(user.id)
if not person:
person = Person()
self.tracked_users[user.id] = person
self.user_added(user.id)
person.update_skeleton(user.skeleton, frame_timestamp)
elif user.id in self.tracked_users:
self.user_removed(user.id)
del self.tracked_users[user.id]
def track_poses(self, user, frame_timestamp):
user_poses = self.users_poses.setdefault(user.id, {})
for pose_type in (
nite2.c_api.NitePoseType.NITE_POSE_PSI,
nite2.c_api.NitePoseType.NITE_POSE_CROSSED_HANDS
):
pose = user.get_pose(pose_type)
if pose.is_entered():
user_poses[pose_type] = frame_timestamp
elif pose.is_held() or pose.is_exited():
pose_start_time = user_poses.get(pose_type)
if pose_start_time and frame_timestamp - pose_start_time >= self.POSE_HOLD_MINIMUM_TIME:
# Remove the pose so that it's not triggered again.
del user_poses[pose_type]
self.pose_callback(user.id, pose_type)
# def update_roles_OLD(self):
# users = self.tracked_users.items()
# changed = False
#
# if len(users) == 1:
# user_id, user = users[0]
# # If there's only one user, determine the role based on the X position.
# x_position = self.get_user_horizontal_position(users[0])
# new_role = UserRole.RIGHT_USER if x_position > 0 else UserRole.LEFT_USER
# if user.role != new_role:
# user.role = new_role
# changed = True
# elif len(users) == 2:
# rightmost_user_id, _ = max(users, key=self.get_user_horizontal_position)
# for user_id, user in users:
# if user_id == rightmost_user_id:
# new_role = UserRole.RIGHT_USER
# else:
# new_role = UserRole.LEFT_USER
#
# if user.role != new_role:
# user.role = new_role
# changed = True
# else:
# #assert len(users) == 0
# if len(users) != 0:
# print "More than 2 users"
#
# if changed:
# self.user_roles_changed()
def update_roles(self):
if len(self.tracked_users) == 0:
return
if len(self.tracked_users) > 2:
print "Huh? More than 2 tracked users in update_roles"
return
existing_roles = [user.role for user in self.tracked_users.values()]
if all(existing_roles):
# All tracked users already have a role
return
users = self.tracked_users.items()
all_roles = set((UserRole.LEFT_USER, UserRole.RIGHT_USER))
used_roles = set(filter(None, existing_roles))
available_roles = sorted(all_roles - used_roles)
for user_id, user in users:
if user.role is None:
user.role = available_roles.pop(0)
self.user_roles_changed(user_id)
@staticmethod
def get_user_horizontal_position(user_item):
# TODO: Use center of mass instead of torso
user_id, user = user_item
return user.skeleton.get_joint(nite2.c_api.NiteJointType.NITE_JOINT_TORSO).position.x
class UserRole(object):
LEFT_USER = 1
RIGHT_USER = 2
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.idl
~~~~~~~~~~~~~~~~~~~
Lexers for IDL.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, words
from pygments.token import Text, Comment, Operator, Keyword, Name, Number
__all__ = ['IDLLexer']
class IDLLexer(RegexLexer):
"""
Pygments Lexer for IDL (Interactive Data Language).
.. versionadded:: 1.6
"""
name = 'IDL'
aliases = ['idl']
filenames = ['*.pro']
mimetypes = ['text/idl']
flags = re.IGNORECASE | re.MULTILINE
_RESERVED = (
'and', 'begin', 'break', 'case', 'common', 'compile_opt',
'continue', 'do', 'else', 'end', 'endcase', 'elseelse',
'endfor', 'endforeach', 'endif', 'endrep', 'endswitch',
'endwhile', 'eq', 'for', 'foreach', 'forward_function',
'function', 'ge', 'goto', 'gt', 'if', 'inherits', 'le',
'lt', 'mod', 'ne', 'not', 'of', 'on_ioerror', 'or', 'pro',
'repeat', 'switch', 'then', 'until', 'while', 'xor')
"""Reserved words from: http://www.exelisvis.com/docs/reswords.html"""
_BUILTIN_LIB = (
'abs', 'acos', 'adapt_hist_equal', 'alog', 'alog10',
'amoeba', 'annotate', 'app_user_dir', 'app_user_dir_query',
'arg_present', 'array_equal', 'array_indices', 'arrow',
'ascii_template', 'asin', 'assoc', 'atan', 'axis',
'a_correlate', 'bandpass_filter', 'bandreject_filter',
'barplot', 'bar_plot', 'beseli', 'beselj', 'beselk',
'besely', 'beta', 'bilinear', 'binary_template', 'bindgen',
'binomial', 'bin_date', 'bit_ffs', 'bit_population',
'blas_axpy', 'blk_con', 'box_cursor', 'breakpoint',
'broyden', 'butterworth', 'bytarr', 'byte', 'byteorder',
'bytscl', 'caldat', 'calendar', 'call_external',
'call_function', 'call_method', 'call_procedure', 'canny',
'catch', 'cd', 'cdf_\w*', 'ceil', 'chebyshev',
'check_math',
'chisqr_cvf', 'chisqr_pdf', 'choldc', 'cholsol', 'cindgen',
'cir_3pnt', 'close', 'cluster', 'cluster_tree', 'clust_wts',
'cmyk_convert', 'colorbar', 'colorize_sample',
'colormap_applicable', 'colormap_gradient',
'colormap_rotation', 'colortable', 'color_convert',
'color_exchange', 'color_quan', 'color_range_map', 'comfit',
'command_line_args', 'complex', 'complexarr', 'complexround',
'compute_mesh_normals', 'cond', 'congrid', 'conj',
'constrained_min', 'contour', 'convert_coord', 'convol',
'convol_fft', 'coord2to3', 'copy_lun', 'correlate', 'cos',
'cosh', 'cpu', 'cramer', 'create_cursor', 'create_struct',
'create_view', 'crossp', 'crvlength', 'cti_test',
'ct_luminance', 'cursor', 'curvefit', 'cvttobm', 'cv_coord',
'cw_animate', 'cw_animate_getp', 'cw_animate_load',
'cw_animate_run', 'cw_arcball', 'cw_bgroup', 'cw_clr_index',
'cw_colorsel', 'cw_defroi', 'cw_field', 'cw_filesel',
'cw_form', 'cw_fslider', 'cw_light_editor',
'cw_light_editor_get', 'cw_light_editor_set', 'cw_orient',
'cw_palette_editor', 'cw_palette_editor_get',
'cw_palette_editor_set', 'cw_pdmenu', 'cw_rgbslider',
'cw_tmpl', 'cw_zoom', 'c_correlate', 'dblarr', 'db_exists',
'dcindgen', 'dcomplex', 'dcomplexarr', 'define_key',
'define_msgblk', 'define_msgblk_from_file', 'defroi',
'defsysv', 'delvar', 'dendrogram', 'dendro_plot', 'deriv',
'derivsig', 'determ', 'device', 'dfpmin', 'diag_matrix',
'dialog_dbconnect', 'dialog_message', 'dialog_pickfile',
'dialog_printersetup', 'dialog_printjob',
'dialog_read_image', 'dialog_write_image', 'digital_filter',
'dilate', 'dindgen', 'dissolve', 'dist', 'distance_measure',
'dlm_load', 'dlm_register', 'doc_library', 'double',
'draw_roi', 'edge_dog', 'efont', 'eigenql', 'eigenvec',
'ellipse', 'elmhes', 'emboss', 'empty', 'enable_sysrtn',
'eof', 'eos_\w*', 'erase', 'erf', 'erfc', 'erfcx',
'erode', 'errorplot', 'errplot', 'estimator_filter',
'execute', 'exit', 'exp', 'expand', 'expand_path', 'expint',
'extrac', 'extract_slice', 'factorial', 'fft', 'filepath',
'file_basename', 'file_chmod', 'file_copy', 'file_delete',
'file_dirname', 'file_expand_path', 'file_info',
'file_lines', 'file_link', 'file_mkdir', 'file_move',
'file_poll_input', 'file_readlink', 'file_same',
'file_search', 'file_test', 'file_which', 'findgen',
'finite', 'fix', 'flick', 'float', 'floor', 'flow3',
'fltarr', 'flush', 'format_axis_values', 'free_lun',
'fstat', 'fulstr', 'funct', 'fv_test', 'fx_root',
'fz_roots', 'f_cvf', 'f_pdf', 'gamma', 'gamma_ct',
'gauss2dfit', 'gaussfit', 'gaussian_function', 'gaussint',
'gauss_cvf', 'gauss_pdf', 'gauss_smooth', 'getenv',
'getwindows', 'get_drive_list', 'get_dxf_objects',
'get_kbrd', 'get_login_info', 'get_lun', 'get_screen_size',
'greg2jul', 'grib_\w*', 'grid3', 'griddata',
'grid_input', 'grid_tps', 'gs_iter',
'h5[adfgirst]_\w*', 'h5_browser', 'h5_close',
'h5_create', 'h5_get_libversion', 'h5_open', 'h5_parse',
'hanning', 'hash', 'hdf_\w*', 'heap_free',
'heap_gc', 'heap_nosave', 'heap_refcount', 'heap_save',
'help', 'hilbert', 'histogram', 'hist_2d', 'hist_equal',
'hls', 'hough', 'hqr', 'hsv', 'h_eq_ct', 'h_eq_int',
'i18n_multibytetoutf8', 'i18n_multibytetowidechar',
'i18n_utf8tomultibyte', 'i18n_widechartomultibyte',
'ibeta', 'icontour', 'iconvertcoord', 'idelete', 'identity',
'idlexbr_assistant', 'idlitsys_createtool', 'idl_base64',
'idl_validname', 'iellipse', 'igamma', 'igetcurrent',
'igetdata', 'igetid', 'igetproperty', 'iimage', 'image',
'image_cont', 'image_statistics', 'imaginary', 'imap',
'indgen', 'intarr', 'interpol', 'interpolate',
'interval_volume', 'int_2d', 'int_3d', 'int_tabulated',
'invert', 'ioctl', 'iopen', 'iplot', 'ipolygon',
'ipolyline', 'iputdata', 'iregister', 'ireset', 'iresolve',
'irotate', 'ir_filter', 'isa', 'isave', 'iscale',
'isetcurrent', 'isetproperty', 'ishft', 'isocontour',
'isosurface', 'isurface', 'itext', 'itranslate', 'ivector',
'ivolume', 'izoom', 'i_beta', 'journal', 'json_parse',
'json_serialize', 'jul2greg', 'julday', 'keyword_set',
'krig2d', 'kurtosis', 'kw_test', 'l64indgen', 'label_date',
'label_region', 'ladfit', 'laguerre', 'laplacian',
'la_choldc', 'la_cholmprove', 'la_cholsol', 'la_determ',
'la_eigenproblem', 'la_eigenql', 'la_eigenvec', 'la_elmhes',
'la_gm_linear_model', 'la_hqr', 'la_invert',
'la_least_squares', 'la_least_square_equality',
'la_linear_equation', 'la_ludc', 'la_lumprove', 'la_lusol',
'la_svd', 'la_tridc', 'la_trimprove', 'la_triql',
'la_trired', 'la_trisol', 'least_squares_filter', 'leefilt',
'legend', 'legendre', 'linbcg', 'lindgen', 'linfit',
'linkimage', 'list', 'll_arc_distance', 'lmfit', 'lmgr',
'lngamma', 'lnp_test', 'loadct', 'locale_get',
'logical_and', 'logical_or', 'logical_true', 'lon64arr',
'lonarr', 'long', 'long64', 'lsode', 'ludc', 'lumprove',
'lusol', 'lu_complex', 'machar', 'make_array', 'make_dll',
'make_rt', 'map', 'mapcontinents', 'mapgrid', 'map_2points',
'map_continents', 'map_grid', 'map_image', 'map_patch',
'map_proj_forward', 'map_proj_image', 'map_proj_info',
'map_proj_init', 'map_proj_inverse', 'map_set',
'matrix_multiply', 'matrix_power', 'max', 'md_test',
'mean', 'meanabsdev', 'mean_filter', 'median', 'memory',
'mesh_clip', 'mesh_decimate', 'mesh_issolid', 'mesh_merge',
'mesh_numtriangles', 'mesh_obj', 'mesh_smooth',
'mesh_surfacearea', 'mesh_validate', 'mesh_volume',
'message', 'min', 'min_curve_surf', 'mk_html_help',
'modifyct', 'moment', 'morph_close', 'morph_distance',
'morph_gradient', 'morph_hitormiss', 'morph_open',
'morph_thin', 'morph_tophat', 'multi', 'm_correlate',
'ncdf_\w*', 'newton', 'noise_hurl', 'noise_pick',
'noise_scatter', 'noise_slur', 'norm', 'n_elements',
'n_params', 'n_tags', 'objarr', 'obj_class', 'obj_destroy',
'obj_hasmethod', 'obj_isa', 'obj_new', 'obj_valid',
'online_help', 'on_error', 'open', 'oplot', 'oploterr',
'parse_url', 'particle_trace', 'path_cache', 'path_sep',
'pcomp', 'plot', 'plot3d', 'ploterr', 'plots', 'plot_3dbox',
'plot_field', 'pnt_line', 'point_lun', 'polarplot',
'polar_contour', 'polar_surface', 'poly', 'polyfill',
'polyfillv', 'polygon', 'polyline', 'polyshade', 'polywarp',
'poly_2d', 'poly_area', 'poly_fit', 'popd', 'powell',
'pref_commit', 'pref_get', 'pref_set', 'prewitt', 'primes',
'print', 'printd', 'product', 'profile', 'profiler',
'profiles', 'project_vol', 'psafm', 'pseudo',
'ps_show_fonts', 'ptrarr', 'ptr_free', 'ptr_new',
'ptr_valid', 'pushd', 'p_correlate', 'qgrid3', 'qhull',
'qromb', 'qromo', 'qsimp', 'query_ascii', 'query_bmp',
'query_csv', 'query_dicom', 'query_gif', 'query_image',
'query_jpeg', 'query_jpeg2000', 'query_mrsid', 'query_pict',
'query_png', 'query_ppm', 'query_srf', 'query_tiff',
'query_wav', 'radon', 'randomn', 'randomu', 'ranks',
'rdpix', 'read', 'reads', 'readu', 'read_ascii',
'read_binary', 'read_bmp', 'read_csv', 'read_dicom',
'read_gif', 'read_image', 'read_interfile', 'read_jpeg',
'read_jpeg2000', 'read_mrsid', 'read_pict', 'read_png',
'read_ppm', 'read_spr', 'read_srf', 'read_sylk',
'read_tiff', 'read_wav', 'read_wave', 'read_x11_bitmap',
'read_xwd', 'real_part', 'rebin', 'recall_commands',
'recon3', 'reduce_colors', 'reform', 'region_grow',
'register_cursor', 'regress', 'replicate',
'replicate_inplace', 'resolve_all', 'resolve_routine',
'restore', 'retall', 'return', 'reverse', 'rk4', 'roberts',
'rot', 'rotate', 'round', 'routine_filepath',
'routine_info', 'rs_test', 'r_correlate', 'r_test',
'save', 'savgol', 'scale3', 'scale3d', 'scope_level',
'scope_traceback', 'scope_varfetch', 'scope_varname',
'search2d', 'search3d', 'sem_create', 'sem_delete',
'sem_lock', 'sem_release', 'setenv', 'set_plot',
'set_shading', 'sfit', 'shade_surf', 'shade_surf_irr',
'shade_volume', 'shift', 'shift_diff', 'shmdebug', 'shmmap',
'shmunmap', 'shmvar', 'show3', 'showfont', 'simplex', 'sin',
'sindgen', 'sinh', 'size', 'skewness', 'skip_lun',
'slicer3', 'slide_image', 'smooth', 'sobel', 'socket',
'sort', 'spawn', 'spher_harm', 'sph_4pnt', 'sph_scat',
'spline', 'spline_p', 'spl_init', 'spl_interp', 'sprsab',
'sprsax', 'sprsin', 'sprstp', 'sqrt', 'standardize',
'stddev', 'stop', 'strarr', 'strcmp', 'strcompress',
'streamline', 'stregex', 'stretch', 'string', 'strjoin',
'strlen', 'strlowcase', 'strmatch', 'strmessage', 'strmid',
'strpos', 'strput', 'strsplit', 'strtrim', 'struct_assign',
'struct_hide', 'strupcase', 'surface', 'surfr', 'svdc',
'svdfit', 'svsol', 'swap_endian', 'swap_endian_inplace',
'symbol', 'systime', 's_test', 't3d', 'tag_names', 'tan',
'tanh', 'tek_color', 'temporary', 'tetra_clip',
'tetra_surface', 'tetra_volume', 'text', 'thin', 'threed',
'timegen', 'time_test2', 'tm_test', 'total', 'trace',
'transpose', 'triangulate', 'trigrid', 'triql', 'trired',
'trisol', 'tri_surf', 'truncate_lun', 'ts_coef', 'ts_diff',
'ts_fcast', 'ts_smooth', 'tv', 'tvcrs', 'tvlct', 'tvrd',
'tvscl', 'typename', 't_cvt', 't_pdf', 'uindgen', 'uint',
'uintarr', 'ul64indgen', 'ulindgen', 'ulon64arr', 'ulonarr',
'ulong', 'ulong64', 'uniq', 'unsharp_mask', 'usersym',
'value_locate', 'variance', 'vector', 'vector_field', 'vel',
'velovect', 'vert_t3d', 'voigt', 'voronoi', 'voxel_proj',
'wait', 'warp_tri', 'watershed', 'wdelete', 'wf_draw',
'where', 'widget_base', 'widget_button', 'widget_combobox',
'widget_control', 'widget_displaycontextmen', 'widget_draw',
'widget_droplist', 'widget_event', 'widget_info',
'widget_label', 'widget_list', 'widget_propertysheet',
'widget_slider', 'widget_tab', 'widget_table',
'widget_text', 'widget_tree', 'widget_tree_move',
'widget_window', 'wiener_filter', 'window', 'writeu',
'write_bmp', 'write_csv', 'write_gif', 'write_image',
'write_jpeg', 'write_jpeg2000', 'write_nrif', 'write_pict',
'write_png', 'write_ppm', 'write_spr', 'write_srf',
'write_sylk', 'write_tiff', 'write_wav', 'write_wave',
'wset', 'wshow', 'wtn', 'wv_applet', 'wv_cwt',
'wv_cw_wavelet', 'wv_denoise', 'wv_dwt', 'wv_fn_coiflet',
'wv_fn_daubechies', 'wv_fn_gaussian', 'wv_fn_haar',
'wv_fn_morlet', 'wv_fn_paul', 'wv_fn_symlet',
'wv_import_data', 'wv_import_wavelet', 'wv_plot3d_wps',
'wv_plot_multires', 'wv_pwt', 'wv_tool_denoise',
'xbm_edit', 'xdisplayfile', 'xdxf', 'xfont',
'xinteranimate', 'xloadct', 'xmanager', 'xmng_tmpl',
'xmtool', 'xobjview', 'xobjview_rotate',
'xobjview_write_image', 'xpalette', 'xpcolor', 'xplot3d',
'xregistered', 'xroi', 'xsq_test', 'xsurface', 'xvaredit',
'xvolume', 'xvolume_rotate', 'xvolume_write_image',
'xyouts', 'zoom', 'zoom_24')
"""Functions from: http://www.exelisvis.com/docs/routines-1.html"""
tokens = {
'root': [
(r'^\s*;.*?\n', Comment.Singleline),
(words(_RESERVED, prefix=r'\b', suffix=r'\b'), Keyword),
(words(_BUILTIN_LIB, prefix=r'\b', suffix=r'\b'), Name.Builtin),
(r'\+=|-=|\^=|\*=|/=|#=|##=|<=|>=|=', Operator),
(r'\+\+|--|->|\+|-|##|#|\*|/|<|>|&&|\^|~|\|\|\?|:', Operator),
(r'\b(mod=|lt=|le=|eq=|ne=|ge=|gt=|not=|and=|or=|xor=)', Operator),
(r'\b(mod|lt|le|eq|ne|ge|gt|not|and|or|xor)\b', Operator),
(r'\b[0-9](L|B|S|UL|ULL|LL)?\b', Number),
(r'.', Text),
]
}
|
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by Exopy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Test dependency collection functions.
"""
from operator import getitem
import pytest
from exopy.app.dependencies.api import (BuildDependency,
RuntimeDependencyAnalyser)
from exopy.tasks.api import ComplexTask, InstrumentTask, TaskInterface
from exopy.tasks.infos import (TaskInfos, InterfaceInfos,
INSTR_RUNTIME_TASK_DRIVERS_ID,
INSTR_RUNTIME_TASK_PROFILES_ID,
INSTR_RUNTIME_INTERFACE_DRIVERS_ID,
INSTR_RUNTIME_INTERFACE_PROFILES_ID)
@pytest.fixture
def task_dep_collector(task_workbench):
"""Collector for task dependencies.
"""
plugin = task_workbench.get_plugin('exopy.tasks')
dep_ext = [e for e in plugin.manifest.extensions
if e.id == 'build_deps'][0]
return [b for b in dep_ext.get_children(BuildDependency)
if b.id == 'exopy.task'][0]
@pytest.fixture
def interface_dep_collector(task_workbench):
"""Collector for interface dependencies.
"""
plugin = task_workbench.get_plugin('exopy.tasks')
dep_ext = [e for e in plugin.manifest.extensions
if e.id == 'build_deps'][0]
return [b for b in dep_ext.get_children(BuildDependency)
if b.id == 'exopy.tasks.interface'][0]
@pytest.fixture
def driver_dep_collector(task_workbench):
"""Collector for driver dependencies for task supporting instrument and
having the proper selected_intrument member.
"""
plugin = task_workbench.get_plugin('exopy.tasks')
dep_ext = [e for e in plugin.manifest.extensions
if e.id == 'runtime_deps'][0]
return [b for b in dep_ext.get_children(RuntimeDependencyAnalyser)
if b.id == INSTR_RUNTIME_TASK_DRIVERS_ID][0]
@pytest.fixture
def profile_dep_collector(task_workbench):
"""Collector for profile dependencies for task supporting instrument and
having the proper selected_intrument member.
"""
plugin = task_workbench.get_plugin('exopy.tasks')
dep_ext = [e for e in plugin.manifest.extensions
if e.id == 'runtime_deps'][0]
return [b for b in dep_ext.get_children(RuntimeDependencyAnalyser)
if b.id == INSTR_RUNTIME_TASK_PROFILES_ID][0]
@pytest.fixture
def i_driver_dep_collector(task_workbench):
"""Collector for driver dependencies for interface supporting instrument
and having the proper selected_intrument member or being attached to a task
that does.
"""
plugin = task_workbench.get_plugin('exopy.tasks')
dep_ext = [e for e in plugin.manifest.extensions
if e.id == 'runtime_deps'][0]
return [b for b in dep_ext.get_children(RuntimeDependencyAnalyser)
if b.id == INSTR_RUNTIME_INTERFACE_DRIVERS_ID][0]
@pytest.fixture
def i_profile_dep_collector(task_workbench):
"""Collector for profile dependencies for interface supporting instrument
and having the proper selected_intrument member or being attached to a task
that does.
"""
plugin = task_workbench.get_plugin('exopy.tasks')
dep_ext = [e for e in plugin.manifest.extensions
if e.id == 'runtime_deps'][0]
return [b for b in dep_ext.get_children(RuntimeDependencyAnalyser)
if b.id == INSTR_RUNTIME_INTERFACE_PROFILES_ID][0]
def test_analysing_task_dependencies(monkeypatch, task_workbench,
task_dep_collector):
"""Test analysing the dependencies of a task.
"""
runtime = {'test'}
plugin = task_workbench.get_plugin('exopy.tasks')
monkeypatch.setattr(plugin.get_task_infos('exopy.ComplexTask'),
'dependencies', runtime)
dep = set()
errors = dict()
run = task_dep_collector.analyse(task_workbench, ComplexTask(), getattr,
dep, errors)
assert run == runtime
assert 'exopy.ComplexTask' in dep
assert not errors
dep = set()
run = task_dep_collector.analyse(task_workbench, {'task_id': '__dummy__'},
getitem, dep, errors)
assert not run
assert not dep
assert '__dummy__' in errors
def test_validating_task_dependencies(task_workbench, task_dep_collector):
"""Test validating task dependencies.
"""
errors = {}
task_dep_collector.validate(task_workbench,
{'exopy.ComplexTask', '__dummy__'}, errors)
assert 'exopy.ComplexTask' not in errors
assert '__dummy__' in errors
def test_collecting_task_dependencies(task_workbench, task_dep_collector):
"""Test collecting the dependencies found in a task.
"""
dependencies = dict.fromkeys(['exopy.ComplexTask', '__dummy__'])
errors = {}
task_dep_collector.collect(task_workbench, dependencies, errors)
assert 'exopy.ComplexTask' in dependencies
assert '__dummy__' in errors
def test_analysing_interface_dependencies(monkeypatch, task_workbench,
interface_dep_collector):
"""Test analysing the dependencies in an interface.
"""
runtime = {'test'}
interface = 'exopy.LoopTask:exopy.LinspaceLoopInterface'
plugin = task_workbench.get_plugin('exopy.tasks')
monkeypatch.setattr(plugin.get_interface_infos(interface), 'dependencies',
runtime)
dep = set()
errors = dict()
run = interface_dep_collector.analyse(task_workbench,
{'interface_id': str(interface)},
getitem, dep, errors)
assert run == runtime
assert interface in dep
assert not errors
dep.clear()
run = interface_dep_collector.analyse(task_workbench,
{'interface_id':
'LoopTask:__dummy__'},
getitem, dep, errors)
assert not run
assert not dep
assert 'LoopTask:__dummy__' in errors
def test_validating_interface_dependencies(task_workbench,
interface_dep_collector):
"""Test validating interface dependencies.
"""
errors = {}
interface_dep_collector.validate(
task_workbench,
{'exopy.LoopTask:exopy.LinspaceLoopInterface',
'LoopTask:__dummy__'}, errors)
assert 'exopy.LoopTask:exopy.LinspaceLoopInterface' not in errors
assert 'LoopTask:__dummy__' in errors
def test_collecting_interface_dependencies(task_workbench,
interface_dep_collector):
"""Test collecting the dependencies found in an interface.
"""
dependencies = dict.fromkeys(['exopy.LoopTask:exopy.LinspaceLoopInterface',
'LoopTask:__dummy__'])
errors = {}
interface_dep_collector.collect(task_workbench, dependencies, errors)
assert 'exopy.LoopTask:exopy.LinspaceLoopInterface' in dependencies
assert 'LoopTask:__dummy__' in errors
def test_analysing_instr_task_dependencies(monkeypatch, task_workbench,
task_dep_collector,
profile_dep_collector,
driver_dep_collector):
"""Test analysing the dependencies of a task.
"""
plugin = task_workbench.get_plugin('exopy.tasks')
plugin._tasks.contributions['exopy.InstrumentTask'] =\
TaskInfos(cls=InstrumentTask, instruments=['test'])
dep = set()
errors = dict()
t = InstrumentTask(selected_instrument=('test', 'dummy', 'c', None))
run = task_dep_collector.analyse(task_workbench, t, getattr,
dep, errors)
assert run == {'exopy.tasks.instruments.drivers',
'exopy.tasks.instruments.profiles'}
assert 'exopy.InstrumentTask' in dep
assert not errors
dep.clear()
profile_dep_collector.analyse(task_workbench, t, dep, errors)
assert 'test' in dep
assert not errors
dep.clear()
driver_dep_collector.analyse(task_workbench, t, dep, errors)
assert 'dummy' in dep
assert not errors
def test_analysing_instr_interface_dependencies(monkeypatch, task_workbench,
interface_dep_collector,
i_profile_dep_collector,
i_driver_dep_collector):
"""Test analysing the dependencies of an interface.
"""
class FalseI(TaskInterface):
__slots__ = ('__dict__')
plugin = task_workbench.get_plugin('exopy.tasks')
p_infos = TaskInfos(cls=InstrumentTask, instruments=['test'])
plugin._tasks.contributions['exopy.InstrumentTask'] = p_infos
p_infos.interfaces['tasks.FalseI'] =\
InterfaceInfos(cls=FalseI, instruments=['test'], parent=p_infos)
dep = set()
errors = dict()
i = FalseI()
t = InstrumentTask(selected_instrument=('test', 'dummy', 'c', None))
i.task = t
run = interface_dep_collector.analyse(task_workbench, i, getattr,
dep, errors)
assert run == {INSTR_RUNTIME_INTERFACE_DRIVERS_ID,
INSTR_RUNTIME_INTERFACE_PROFILES_ID}
assert 'exopy.InstrumentTask:tasks.FalseI' in dep
assert not errors
dep.clear()
i_profile_dep_collector.analyse(task_workbench, i, dep, errors)
assert 'test' in dep
assert not errors
dep.clear()
i_driver_dep_collector.analyse(task_workbench, i, dep, errors)
assert 'dummy' in dep
assert not errors
i.selected_instrument = ('test2', 'dummy2', 'c', None)
dep.clear()
i_profile_dep_collector.analyse(task_workbench, i, dep, errors)
assert 'test2' in dep
assert not errors
dep.clear()
i_driver_dep_collector.analyse(task_workbench, i, dep, errors)
assert 'dummy2' in dep
assert not errors
|
|
# coding=utf-8
# Copyright (c) 2017 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import shutil
import tempfile
from unittest import TestCase
import mock
from hamcrest import assert_that, equal_to, raises
import storops
from storops.exception import UnityThinCloneLimitExceededError
from storops.lib.thinclone_helper import TCHelper
from storops.unity.enums import ThinCloneActionEnum
from storops.unity.resource.lun import UnityLun
from storops.unity.resource.snap import UnitySnap
from storops_test.unity.rest_mock import patch_rest, t_rest
class TestThinCloneHelper(TestCase):
@classmethod
def setUpClass(cls):
storops.enable_log()
def setUp(self):
self.path = tempfile.mkdtemp(suffix='storops')
TCHelper.set_up(self.path)
TCHelper._gc_background.set_interval(0.10)
TCHelper._gc_background.MAX_RETRIES = 1
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
TCHelper.clean_up()
@patch_rest
def test_thin_clone_lun(self):
lun = UnityLun.get(_id='sv_2', cli=t_rest(version='4.2.0'))
cloned = TCHelper.thin_clone(lun._cli, lun, name='test_thin_clone_lun',
description='description',
io_limit_policy=None)
assert_that(cloned.id, equal_to('sv_5555'))
@patch_rest
def test_thin_clone_lun_new_tc_base(self):
TCHelper._tc_cache['sv_2'] = UnityLun.get(_id='sv_5605',
cli=t_rest(version='4.2.0'))
lun = UnityLun.get(_id='sv_2', cli=t_rest(version='4.2.0'))
cloned = TCHelper.thin_clone(lun._cli, lun, name='test_thin_clone_lun',
description='description',
io_limit_policy=None)
assert_that(cloned.id, equal_to('sv_5556'))
@patch_rest
def test_thin_clone_snap(self):
snap = UnitySnap.get(_id='38654700002', cli=t_rest(version='4.2.0'))
cloned = TCHelper.thin_clone(snap._cli, snap,
name='test_thin_clone_snap',
description='description',
io_limit_policy=None)
assert_that(cloned.id, equal_to('sv_5557'))
@patch_rest
def test_thin_clone_snap_new_tc_base(self):
TCHelper._tc_cache['sv_2'] = UnityLun.get(_id='sv_5605',
cli=t_rest(version='4.2.0'))
lun = UnityLun.get(_id='sv_2', cli=t_rest(version='4.2.0'))
cloned = TCHelper.thin_clone(lun._cli, lun, name='test_thin_clone_lun',
description='description',
io_limit_policy=None)
assert_that(cloned.id, equal_to('sv_5556'))
@patch_rest
def test_thin_clone_limit_exceeded(self):
lun = UnityLun.get(_id='sv_2', cli=t_rest(version='4.2.0'))
def _inner():
TCHelper.thin_clone(lun._cli, lun,
name='test_thin_clone_limit_exceeded',
description='This is description.',
io_limit_policy=None)
assert_that(_inner, raises(UnityThinCloneLimitExceededError))
@patch_rest
def test_notify_dd_copy(self):
lun = UnityLun.get(_id='sv_2', cli=t_rest(version='4.2.0'))
copied_lun = UnityLun.get(_id='sv_3', cli=t_rest(version='4.2.0'))
TCHelper.notify(lun, ThinCloneActionEnum.DD_COPY, copied_lun)
self.assertTrue(lun.get_id() in TCHelper._tc_cache)
self.assertEqual(copied_lun, TCHelper._tc_cache[lun.get_id()])
self.assertFalse(lun.get_id() in TCHelper._gc_candidates)
@patch_rest
def test_notify_dd_copy_gc_background(self):
lun = UnityLun.get(_id='sv_2', cli=t_rest(version='4.2.0'))
copied_lun = UnityLun.get(_id='sv_3', cli=t_rest(version='4.2.0'))
old_lun = UnityLun.get(_id='sv_4', cli=t_rest(version='4.2.0'))
TCHelper._tc_cache[lun.get_id()] = old_lun
TCHelper.notify(lun, ThinCloneActionEnum.DD_COPY, copied_lun)
self.assertTrue(lun.get_id() in TCHelper._tc_cache)
self.assertEqual(copied_lun, TCHelper._tc_cache[lun.get_id()])
self.assertTrue(old_lun.get_id() in TCHelper._gc_candidates)
@mock.patch('storops.lib.thinclone_helper.TCHelper._gc_background.put')
@patch_rest
def test_notify_dd_copy_gc(self, mocked_put):
lun = UnityLun.get(_id='sv_2', cli=t_rest(version='4.2.0'))
copied_lun = UnityLun.get(_id='sv_3', cli=t_rest(version='4.2.0'))
old_lun = UnityLun.get(_id='sv_4', cli=t_rest(version='4.2.0'))
TCHelper._tc_cache[lun.get_id()] = old_lun
TCHelper.notify(lun, ThinCloneActionEnum.DD_COPY, copied_lun)
self.assertTrue(lun.get_id() in TCHelper._tc_cache)
self.assertEqual(copied_lun, TCHelper._tc_cache[lun.get_id()])
self.assertTrue(old_lun.get_id() in TCHelper._gc_candidates)
mocked_put.assert_called_with(TCHelper._delete_base_lun,
base_lun=old_lun)
@patch_rest
def test_notify_lun_attach(self):
lun = UnityLun.get(_id='sv_2', cli=t_rest(version='4.2.0'))
TCHelper.notify(lun, ThinCloneActionEnum.LUN_ATTACH)
self.assertFalse(lun.get_id() in TCHelper._tc_cache)
self.assertFalse(lun.get_id() in TCHelper._gc_candidates)
@mock.patch('storops.lib.thinclone_helper.TCHelper._gc_background.put')
@patch_rest
def test_notify_lun_attach_gc(self, mocked_put):
lun = UnityLun.get(_id='sv_2', cli=t_rest(version='4.2.0'))
old_lun = UnityLun.get(_id='sv_4', cli=t_rest(version='4.2.0'))
TCHelper._tc_cache[lun.get_id()] = old_lun
TCHelper.notify(lun, ThinCloneActionEnum.LUN_ATTACH)
self.assertFalse(lun.get_id() in TCHelper._tc_cache)
self.assertTrue(old_lun.get_id() in TCHelper._gc_candidates)
mocked_put.assert_called_with(TCHelper._delete_base_lun,
base_lun=old_lun)
@mock.patch('storops.unity.resource.lun.UnityLun.delete')
@patch_rest
def test_notify_tc_delete_base_lun_still_using(self, lun_delete):
lun = UnityLun.get(_id='sv_5600', cli=t_rest(version='4.2.0'))
TCHelper.notify(lun, ThinCloneActionEnum.TC_DELETE)
self.assertFalse(lun.get_id() in TCHelper._tc_cache)
self.assertFalse(lun.get_id() in TCHelper._gc_candidates)
lun_delete.assert_not_called()
@mock.patch('storops.lib.thinclone_helper.TCHelper._gc_background.put')
@mock.patch('storops.unity.resource.lun.UnityLun.delete')
@patch_rest
def test_notify_tc_delete_base_lun_having_thinclone(self, mocked_put,
lun_delete):
lun = UnityLun.get(_id='sv_5602', cli=t_rest(version='4.2.0'))
base_lun = UnityLun.get(_id='sv_5603', cli=t_rest(version='4.2.0'))
TCHelper._gc_candidates[base_lun.get_id()] = base_lun.get_id()
TCHelper.notify(lun, ThinCloneActionEnum.TC_DELETE)
self.assertFalse(lun.get_id() in TCHelper._tc_cache)
self.assertFalse(lun.get_id() in TCHelper._gc_candidates)
self.assertTrue(base_lun.get_id() in TCHelper._gc_candidates)
lun_delete.assert_not_called()
@patch_rest
def test_notify_tc_delete_base_lun_snap_under_destroying(self):
lun = UnityLun.get(_id='sv_5606', cli=t_rest(version='4.2.0'))
base_lun = UnityLun.get(_id='sv_5607', cli=t_rest(version='4.2.0'))
TCHelper._gc_candidates[base_lun.get_id()] = base_lun.get_id()
TCHelper.notify(lun, ThinCloneActionEnum.TC_DELETE)
self.assertFalse(lun.get_id() in TCHelper._tc_cache)
self.assertFalse(lun.get_id() in TCHelper._gc_candidates)
self.assertTrue(base_lun.get_id() in TCHelper._gc_candidates)
@mock.patch('storops.unity.resource.lun.UnityLun.delete')
@patch_rest
def test_notify_tc_delete_base_lun_ready_for_gc(self, lun_delete):
lun = UnityLun.get(_id='sv_5600', cli=t_rest(version='4.2.0'))
base_lun = UnityLun.get(_id='sv_5601', cli=t_rest(version='4.2.0'))
TCHelper._gc_candidates[base_lun.get_id()] = base_lun.get_id()
TCHelper.notify(lun, ThinCloneActionEnum.TC_DELETE)
self.assertFalse(lun.get_id() in TCHelper._tc_cache)
self.assertFalse(lun.get_id() in TCHelper._gc_candidates)
self.assertFalse(base_lun.get_id() in TCHelper._gc_candidates)
lun_delete.assert_called_once()
@mock.patch('storops.lib.thinclone_helper.TCHelper._gc_background.put')
@patch_rest
def test_notify_tc_delete_base_lun(self, mocked_put):
base_lun = UnityLun.get(_id='sv_5608', cli=t_rest(version='4.2.0'))
TCHelper.notify(base_lun, ThinCloneActionEnum.BASE_LUN_DELETE)
self.assertTrue(base_lun.get_id() in TCHelper._gc_candidates)
mocked_put.assert_called_with(TCHelper._delete_base_lun,
base_lun=base_lun)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.texttospeech.v1beta1 TextToSpeech API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import grpc
from google.cloud.texttospeech_v1beta1.gapic import enums
from google.cloud.texttospeech_v1beta1.gapic import text_to_speech_client_config
from google.cloud.texttospeech_v1beta1.gapic.transports import (
text_to_speech_grpc_transport,
)
from google.cloud.texttospeech_v1beta1.proto import cloud_tts_pb2
from google.cloud.texttospeech_v1beta1.proto import cloud_tts_pb2_grpc
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
"google-cloud-texttospeech"
).version
class TextToSpeechClient(object):
"""Service that implements Google Cloud Text-to-Speech API."""
SERVICE_ADDRESS = "texttospeech.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.texttospeech.v1beta1.TextToSpeech"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TextToSpeechClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.TextToSpeechGrpcTransport,
Callable[[~.Credentials, type], ~.TextToSpeechGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = text_to_speech_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=text_to_speech_grpc_transport.TextToSpeechGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = text_to_speech_grpc_transport.TextToSpeechGrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def list_voices(
self,
language_code=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Returns a list of Voice supported for synthesis.
Example:
>>> from google.cloud import texttospeech_v1beta1
>>>
>>> client = texttospeech_v1beta1.TextToSpeechClient()
>>>
>>> response = client.list_voices()
Args:
language_code (str): Optional. Recommended.
`BCP-47 <https://www.rfc-editor.org/rfc/bcp/bcp47.txt>`__ language tag.
If specified, the ListVoices call will only return voices that can be
used to synthesize this language\_code. E.g. when specifying "en-NZ",
you will get supported "en-*" voices; when specifying "no", you will get
supported "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices;
specifying "zh" will also get supported "cmn-*" voices; specifying
"zh-hk" will also get supported "yue-\*" voices.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.texttospeech_v1beta1.types.ListVoicesResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_voices" not in self._inner_api_calls:
self._inner_api_calls[
"list_voices"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_voices,
default_retry=self._method_configs["ListVoices"].retry,
default_timeout=self._method_configs["ListVoices"].timeout,
client_info=self._client_info,
)
request = cloud_tts_pb2.ListVoicesRequest(language_code=language_code)
return self._inner_api_calls["list_voices"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def synthesize_speech(
self,
input_,
voice,
audio_config,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Synthesizes speech synchronously: receive results after all text input
has been processed.
Example:
>>> from google.cloud import texttospeech_v1beta1
>>>
>>> client = texttospeech_v1beta1.TextToSpeechClient()
>>>
>>> # TODO: Initialize `input_`:
>>> input_ = {}
>>>
>>> # TODO: Initialize `voice`:
>>> voice = {}
>>>
>>> # TODO: Initialize `audio_config`:
>>> audio_config = {}
>>>
>>> response = client.synthesize_speech(input_, voice, audio_config)
Args:
input_ (Union[dict, ~google.cloud.texttospeech_v1beta1.types.SynthesisInput]): Required. The Synthesizer requires either plain text or SSML as input.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.texttospeech_v1beta1.types.SynthesisInput`
voice (Union[dict, ~google.cloud.texttospeech_v1beta1.types.VoiceSelectionParams]): Required. The desired voice of the synthesized audio.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.texttospeech_v1beta1.types.VoiceSelectionParams`
audio_config (Union[dict, ~google.cloud.texttospeech_v1beta1.types.AudioConfig]): Required. The configuration of the synthesized audio.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.texttospeech_v1beta1.types.AudioConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.texttospeech_v1beta1.types.SynthesizeSpeechResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "synthesize_speech" not in self._inner_api_calls:
self._inner_api_calls[
"synthesize_speech"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.synthesize_speech,
default_retry=self._method_configs["SynthesizeSpeech"].retry,
default_timeout=self._method_configs["SynthesizeSpeech"].timeout,
client_info=self._client_info,
)
request = cloud_tts_pb2.SynthesizeSpeechRequest(
input=input_, voice=voice, audio_config=audio_config
)
return self._inner_api_calls["synthesize_speech"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
|
# Tests for Blt widgets.
import os
import Tkinter
import Test
import Pmw
Test.initialise()
testData = ()
# Blt vector type
def _setVectorItem(index, value):
w = Test.currentWidget()
w[index] = value
def _getVectorItem(index):
w = Test.currentWidget()
return w[index]
def _getVectorSlice(index1, index2):
w = Test.currentWidget()
return w[index1:index2]
def _delVectorItem(index):
w = Test.currentWidget()
del w[index]
def _vectorExpr(instanceMethod):
w = Test.currentWidget()
name = '::' + str(w)
if instanceMethod:
w.expr(name + '+ 0.5')
else:
return Pmw.Blt.vector_expr(name + '* 2')
def _vectorNames():
name = '::' + str(Test.currentWidget())
names = Pmw.Blt.vector_names()
if name not in names:
return names
name2 = Pmw.Blt.vector_names(name)
if name2 != (name,):
return name2
return None
if Test.haveBlt():
c = Pmw.Blt.Vector
tests = (
(c.set, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
(c.__repr__, (), '[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]'),
(c.set, ((1, 2, 3, 4, 5, 6, 7, 8, 9, 10),)),
(c.__repr__, (), '[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]'),
(c.__str__, (), 'PY_VEC4'),
(_getVectorItem, 7, 8),
(_getVectorSlice, (3, 6), [4.0, 5.0, 6.0]),
(_delVectorItem, 9),
(c.get, (), [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]),
(c.append, 10),
(c.get, (), [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]),
(c.length, (), 10),
(c.append, 5),
(c.__len__, (), 11),
(c.count, 5, 2),
(c.count, 20, 0),
(c.search, 5, (4, 10)),
(c.search, 20),
(c.index, 5, 4),
# Commented tests do not work because of a bug in the blt vector command.
# (c.clear, ()),
(_getVectorItem, 4, 5),
#(c.remove, 5), # This causes a core in blt 2.4 under Solaris 2.5
#(c.index, 5, 9),
(c.min, (), 1.0),
(c.max, (), 10.0),
# (c.reverse, ()),
# (c.reverse, ()),
# (c.get, (), [1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 8.0, 9.0, 10.0, 5.0]),
# (c.insert, (3, 66)),
# (c.search, 66, (3,)),
(c.get, (), [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 5.0]),
(c.blt_sort, ()),
(c.get, (), [1.0, 2.0, 3.0, 4.0, 5.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]),
(c.blt_sort_reverse, ()),
(c.get, (), [10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 5.0, 4.0, 3.0, 2.0, 1.0]),
(_setVectorItem, (7, 77)),
(c.search, 77, (7,)),
(_setVectorItem, (2, 77)),
(c.search, 77, (2, 7)),
(_setVectorItem, (11, 77), 'TclError: can\'t set "PY_VEC4(11)": index "11" is out of range'),
(c.get, (), [10.0, 9.0, 77.0, 7.0, 6.0, 5.0, 5.0, 77.0, 3.0, 2.0, 1.0]),
(c.delete, (1, 3, 5)),
(c.get, (), [10.0, 77.0, 6.0, 5.0, 77.0, 3.0, 2.0, 1.0]),
(c.length, (), 8),
(c.length, (9), 9),
(c.get, (), [10.0, 77.0, 6.0, 5.0, 77.0, 3.0, 2.0, 1.0, 0.0]),
(c.range, (1, 3), [77.0, 6.0, 5.0]),
(_vectorExpr, 0, (20.0, 154.0, 12.0, 10.0, 154.0, 6.0, 4.0, 2.0, 0.0)),
(_vectorExpr, 1),
(c.get, (), [10.5, 77.5, 6.5, 5.5, 77.5, 3.5, 2.5, 1.5, 0.5]),
(_vectorNames, ()),
)
testData = testData + ((c, ((tests, {}),)),)
tests = (
(c.get, (), [0.0, 0.0, 0.0, 0.0]),
(c.length, (), 4),
)
testData = testData + ((c, ((tests, {'size' : 4}),)),)
tests = (
# (c.get, (), [0.0, 0.0, 0.0]), Does not work.
(c.length, (), 3),
(_getVectorItem, 2, 0),
(_getVectorItem, 4, 0),
(_getVectorItem, 5, 'IndexError: 5'),
)
testData = testData + ((c, ((tests, {'size' : '2:4'}),)),)
#=============================================================================
# Blt graph widget
def _axisCommand(graph, value):
return 'XX ' + value
def _createMarkerButton():
w = Test.currentWidget()
button = Tkinter.Button(w, text = 'This is\na button')
w.marker_create('window', coords=(10, 200), window=button)
def _axisNamesSorted(pattern = None):
w = Test.currentWidget()
if pattern is None:
names = list(w.axis_names())
else:
names = list(w.axis_names(pattern))
names.sort()
return tuple(names)
def _penNamesSorted(pattern = None):
w = Test.currentWidget()
if pattern is None:
names = list(w.pen_names())
else:
names = list(w.pen_names(pattern))
names.sort()
return tuple(names)
if Test.haveBlt():
c = Pmw.Blt.Graph
tests = (
('height', 700),
('width', 900),
(c.pack, (), {'fill': 'both', 'expand': 1}),
(Test.num_options, (), 43),
(c.pen_create, 'pen1', {'fill': 'green', 'symbol': 'circle'}),
(c.line_create, 'line1', {'xdata': (1, 2, 3, 4, 5, 6, 7, 8, 9, 10), 'ydata': (7, 2, 1, 4, 7, 3, 9, 3, 8, 5), 'pen': 'pen1',}),
(c.bar_create, 'bar1', {'xdata': Test.vector_x, 'ydata': Test.vector_y[0], 'foreground': 'blue'}),
(c.bar_create, 'bar2', {'xdata': Test.vector_x, 'ydata': Test.vector_y[1], 'foreground': 'magenta'}),
(c.line_create, 'line2', {'xdata': Test.vector_x, 'ydata': Test.vector_y[2], 'color': 'red'}),
(c.marker_create, 'text', {'coords': (25, 200), 'rotate': 45, 'text':
'This is\na marker', 'name': 'myMarker1'}, 'myMarker1'),
(c.marker_create, 'line', {'coords': (35, 120, 15, 280), 'linewidth': 4}, 'marker1'),
(c.marker_create, 'polygon', {'coords': (35, 40, 45, 40, 45, 120, 35, 120), 'linewidth': 4}, 'marker2'),
(c.marker_create, 'bitmap', {'coords': (25, 200), 'rotate': 45, 'bitmap': 'questhead'}, 'marker3'),
(_createMarkerButton, ()),
(c.marker_after, 'myMarker1'),
(c.marker_before, ('myMarker1', 'marker3')),
(c.marker_create, 'text', {'coords': (10, 10), 'text':
'Bye', 'name': 'myMarker2'}, 'myMarker2'),
(c.marker_names, 'my*', ('myMarker1', 'myMarker2')),
(c.marker_exists, 'myMarker2', 1),
(c.marker_delete, ('myMarker1', 'myMarker2', 'marker3')),
(c.marker_exists, 'myMarker2', 0),
(c.marker_names, (), ('marker1', 'marker2', 'marker4')),
(c.marker_type, 'marker1', 'line'),
(c.marker_cget, ('marker1', 'linewidth'), '4'),
(c.marker_cget, ('marker2', 'linewidth'), '4'),
(c.marker_configure, (('marker1', 'marker2'),), {'linewidth': 5}),
(c.marker_cget, ('marker1', 'linewidth'), '5'),
(c.marker_cget, ('marker2', 'linewidth'), '5'),
('background', '#ffdddd'),
('barmode', 'stacked'),
('barwidth', 0.5),
('borderwidth', 100),
('borderwidth', 10),
('barwidth', 0.9),
('bottommargin', 100),
('bufferelements', 1),
('cursor', 'watch'),
('font', Test.font['variable']),
('foreground', 'blue'),
('halo', 20),
('barmode', 'aligned'),
('invertxy', 1),
('justify', 'left'),
('leftmargin', 100),
('plotbackground', 'aquamarine'),
('plotborderwidth', 4),
('plotrelief', 'groove'),
('relief', 'ridge'),
('rightmargin', 100),
('takefocus', '0'),
('tile', Test.earthris),
('barmode', 'infront'),
('title', 'Hello there\nmy little lovely'),
('topmargin', 100),
('invertxy', 0),
# Change colours so that axis and legend are visible against image tile.
(c.xaxis_configure, (), {'color': 'green'}),
(c.yaxis_configure, (), {'color': 'green'}),
(c.legend_configure, (), {'background': '#ffffcc'}),
(c.axis_cget, ('x', 'color'), 'green'),
(c.axis_configure, ('x2'), {'color': 'red'}),
(c.axis_cget, ('x2', 'color'), 'red'),
(c.axis_create, 'myaxis', {'rotate': 45}),
(c.axis_cget, ('myaxis', 'rotate'), '45.0'),
(_axisNamesSorted, (), ('myaxis', 'x', 'x2', 'y', 'y2')),
(_axisNamesSorted, ('*x*'), ('myaxis', 'x', 'x2')),
# Blt 2.4u returns the empty string for the axis use command
# (c.y2axis_use, 'myaxis', 'myaxis'),
(c.axis_delete, 'myaxis'),
(c.extents, 'leftmargin', 100),
(c.inside, (1000, 1000), 0),
(c.inside, (400, 400), 1),
(c.snap, Test.emptyimage),
(c.element_bind, ('line1', '<1>', Test.callback), Test.callback),
(c.element_bind, 'line1', ('<Button-1>',)),
(c.legend_bind, ('line1', '<1>', Test.callback), Test.callback),
(c.legend_bind, 'line1', ('<Button-1>',)),
(c.marker_bind, ('marker1', '<1>', Test.callback), Test.callback),
(c.marker_bind, 'marker1', ('<Button-1>',)),
(c.pen_create, 'mypen', {'type' : 'bar', 'foreground': 'red'}),
(c.pen_cget, ('mypen', 'foreground'), 'red'),
(c.pen_configure, ('mypen'), {'foreground': 'green'}),
(c.pen_cget, ('mypen', 'foreground'), 'green'),
(_penNamesSorted, (), ('activeBar', 'activeLine', 'mypen', 'pen1')),
(_penNamesSorted, ('*pen*'), ('mypen', 'pen1')),
(c.pen_delete, 'mypen'),
# These tests are not portable
# (c.invtransform, (0, 0), (-10.2518, 507.203)),
# (c.transform, (-10.2518, 507.203), (0.0, 0.0)),
# Reset margins to automatic
('bottommargin', 0),
('leftmargin', 0),
('rightmargin', 0),
('topmargin', 0),
(c.crosshairs_configure, (), {'hide': 0}),
(c.crosshairs_configure, (), {'position': '@300,300'}),
(c.crosshairs_configure, (), {'color': 'seagreen4'}),
(c.crosshairs_toggle, ()),
(c.crosshairs_cget, 'hide', 1),
(c.crosshairs_configure, (), {'dashes': (4, 8, 8, 8)}),
(c.crosshairs_configure, (), {'linewidth': 4}),
(c.crosshairs_toggle, ()),
(c.crosshairs_cget, 'hide', 0),
(c.crosshairs_off, ()),
(c.crosshairs_cget, 'hide', 1),
(c.crosshairs_on, ()),
(c.crosshairs_cget, 'hide', 0),
# Blt 2.4u gives an error with this (looks like activeBar
# is same as activeLine):
# (c.pen_configure, 'activeBar', {'foreground': '#ffffaa'}),
(c.element_configure, 'bar2', {'foreground': '#ffffaa'}),
# Blt 2.4u segmentation faults around here, remove tests:
# (c.element_activate, 'bar1'),
# (c.element_activate, 'bar2'),
# (c.element_deactivate, ('bar1', 'bar2')),
# (c.element_deactivate, ()),
# (c.element_activate, ('bar2',) + tuple(range(Test.vectorSize / 2))),
(c.element_configure, 'bar1', {'ydata': Test.vector_y[1]}),
(c.element_configure, 'bar2', {'ydata': Test.vector_y[0]}),
(c.element_cget, ('bar1', 'barwidth'), '0.0'),
(c.element_cget, ('bar2', 'barwidth'), '0.0'),
(c.element_configure, (('bar1', 'bar2'),), {'barwidth': 0.5}),
(c.element_cget, ('bar1', 'barwidth'), '0.5'),
(c.element_cget, ('bar2', 'barwidth'), '0.5'),
# These tests are not portable
# (c.element_closest, (330, 430), {}, {'x': 18.0, 'dist': 17.0, 'name': 'bar1', 'index': 18, 'y': 156.0}),
# (c.element_closest, (0, 0)),
# (c.element_closest, (0, 0), {'halo': 500}, {'x': 0.0, 'dist': 154.797, 'name': 'line2', 'index': 0, 'y': 359.0}),
# (c.element_closest, (0, 0), {'halo': 500, 'interpolate': 1}, {'x': -0.0320109, 'dist': 154.797, 'name': 'line2', 'index': 0, 'y': 358.879}),
(c.element_type, 'bar2', 'bar'),
(c.element_type, 'line2', 'line'),
(c.legend_activate, ('line1', 'bar2',)),
(c.legend_activate, ()),
(c.legend_deactivate, ('line1', 'bar2',)),
(c.legend_deactivate, ()),
(c.legend_configure, (), {'hide': 1}),
(c.legend_cget, 'hide', 1),
(c.legend_configure, (), {'hide': 0}),
(c.legend_configure, (), {'position': 'left', 'anchor': 'nw', 'ipadx': 100, 'ipady': 100}),
(c.legend_get, '@150,150', 'line1'),
# This probably works, but I haven't installed the prologue file
# (c.postscript_output, '/tmp/tmp.ps', {'landscape': 1}),
# (os.unlink, '/tmp/tmp.ps'),
(c.element_show, (), ('line2', 'bar2', 'bar1', 'line1')),
(c.element_show, (('line1', 'bar1'),), ('line1', 'bar1')),
(c.element_names, (), ('line1', 'line2', 'bar1', 'bar2')),
(c.element_names, 'line*', ('line1', 'line2')),
(c.element_show, (('line1', 'line2', 'bar1', 'bar2'),), ('line1', 'line2', 'bar1', 'bar2')),
(c.element_exists, 'bar1', 1),
(c.element_delete, ('bar1', 'bar2')),
(c.element_names, (), ('line1', 'line2')),
(c.element_exists, 'bar1', 0),
(c.element_delete, ()),
(c.element_names, (), ('line1', 'line2')),
(c.grid_configure, (), {'hide': 0}),
(c.grid_toggle, ()),
(c.grid_cget, 'hide', 1),
(c.grid_on, ()),
(c.grid_cget, 'hide', 0),
(c.grid_off, ()),
# These tests are not portable
# (c.xaxis_invtransform, 0, -37.1153),
# (c.axis_limits, 'x', (-0.98, 49.98)),
# (c.axis_transform, ('x', 0), 360),
# (c.axis_invtransform, ('y', 0), 444.198),
# (c.yaxis_limits, (), (-6.96, 406.96)),
# (c.axis_transform, ('y', 0), 620),
# (c.axis_invtransform, ('x2', 0), -25.1491),
# (c.axis_limits, 'x2', (-10.4, 10.4)),
# (c.x2axis_transform, 0, 598),
# (c.y2axis_invtransform, 0, 12.2713),
# (c.axis_limits, 'y2', (-10.4, 10.4)),
# (c.axis_transform, ('y2', 0), 341),
)
testData = testData + ((c, ((tests, {}),)),)
if __name__ == '__main__':
Test.runTests(testData)
|
|
"""
NSQ base reader class.
This receives messages from nsqd and calls task methods to process that message
It handles the logic for backing off on retries and giving up on a message
ex.
import nsq
all_tasks = {"task1": task1, "task2": task2}
r = nsq.Reader(all_tasks, lookupd_http_addresses=['127.0.0.1:4161'],
topic="nsq_reader", channel="asdf", lookupd_poll_interval=15)
nsq.run()
"""
import logging
import os
import ujson as json
import time
import signal
import socket
import functools
import urllib
import tornado.options
import tornado.ioloop
import tornado.httpclient
import BackoffTimer
import nsq
import async
tornado.options.define('heartbeat_file', type=str, default=None, help="path to a file to touch for heartbeats")
class RequeueWithoutBackoff(Exception):
"""exception for requeueing a message without incrementing backoff"""
pass
class Reader(object):
def __init__(self, all_tasks, topic, channel,
nsqd_tcp_addresses=None, lookupd_http_addresses=None,
max_tries=5, max_in_flight=1, requeue_delay=90, lookupd_poll_interval=120,
preprocess_method=None, validate_method=None, async=False):
"""
Reader provides a loop that calls each task provided by ``all_tasks`` up to ``max_tries``
requeueing on any failures with increasing multiples of ``requeue_delay`` between subsequent
tries of each message.
``preprocess_method`` defines an optional method that can alter the message data before
other task functions are called.
``validate_method`` defines an optional method that returns a boolean as to weather or not
this message should be processed.
``all_tasks`` defines the a mapping of tasks and functions that individually will be called
with the message data.
``async`` determines whether handlers will do asynchronous processing. If set to True, handlers
must accept a keyword argument called "finisher" that will be a callable used to signal message
completion, with a boolean argument indicating success
"""
assert isinstance(all_tasks, dict)
for key, method in all_tasks.items():
assert callable(method), "key %s must have a callable value" % key
if preprocess_method:
assert callable(preprocess_method)
if validate_method:
assert callable(validate_method)
assert isinstance(topic, (str, unicode)) and len(topic) > 0
assert isinstance(channel, (str, unicode)) and len(channel) > 0
assert isinstance(max_in_flight, int) and 0 < max_in_flight < 2500
if nsqd_tcp_addresses:
if not isinstance(nsqd_tcp_addresses, (list, set, tuple)):
assert isinstance(nsqd_tcp_addresses, (str, unicode))
nsqd_tcp_addresses = [nsqd_tcp_addresses]
else:
nsqd_tcp_addresses = []
if lookupd_http_addresses:
if not isinstance(lookupd_http_addresses, (list, set, tuple)):
assert isinstance(lookupd_http_addresses, (str, unicode))
lookupd_http_addresses = [lookupd_http_addresses]
else:
lookupd_http_addresses = []
assert nsqd_tcp_addresses or lookupd_http_addresses
self.topic = topic
self.channel = channel
self.nsqd_tcp_addresses = nsqd_tcp_addresses
self.lookupd_http_addresses = lookupd_http_addresses
self.requeue_delay = int(requeue_delay * 1000)
self.max_tries = max_tries
self.max_in_flight = max_in_flight
self.lookupd_poll_interval = lookupd_poll_interval
self.async=async
self.task_lookup = all_tasks
self.preprocess_method = preprocess_method
self.validate_method = validate_method
self.backoff_timer = dict((k, BackoffTimer.BackoffTimer(0, 120)) for k in self.task_lookup.keys())
self.hostname = socket.gethostname()
self.short_hostname = self.hostname.split('.')[0]
self.conns = {}
self.http_client = tornado.httpclient.AsyncHTTPClient()
logging.info("starting reader for topic '%s'..." % self.topic)
for task in self.task_lookup:
for addr in self.nsqd_tcp_addresses:
address, port = addr.split(':')
self.connect_to_nsqd(address, int(port), task)
# trigger the first one manually
self.query_lookupd()
tornado.ioloop.PeriodicCallback(self.query_lookupd, self.lookupd_poll_interval * 1000).start()
def callback(self, conn, task, message):
body = message.body
try:
if self.preprocess_method:
body = self.preprocess_method(body)
if self.validate_method and not self.validate_method(body):
return self.finish(conn, message.id)
except Exception:
logging.exception('[%s] caught exception while preprocessing' % conn)
return self.requeue(conn, message)
method_callback = self.task_lookup[task]
try:
if self.async:
# this handler accepts the finisher callable as a keyword arg
finisher = functools.partial(self._client_callback, message=message, task=task, conn=conn)
return method_callback(body, finisher=finisher)
else:
# this is an old-school sync handler, give it just the message
if method_callback(body):
self.backoff_timer[task].success()
return self.finish(conn, message.id)
self.backoff_timer[task].failure()
except RequeueWithoutBackoff:
logging.info('RequeueWithoutBackoff')
except Exception:
logging.exception('[%s] caught exception while handling %s' % (conn, task))
self.backoff_timer[task].failure()
return self.requeue(conn, message)
def _client_callback(self, success, message=None, task=None, conn=None):
'''
This is the method that an asynchronous nsqreader should call to indicate
async completion of a message. This will most likely be exposed as the finisher
callable created in `callback` above with some functools voodoo
'''
if success:
self.backoff_timer[task].success()
self.finish(conn, message.id)
else:
self.backoff_timer[task].failure()
self.requeue(conn, message)
def requeue(self, conn, message, delay=True):
if message.attempts > self.max_tries:
logging.warning('[%s] giving up on message after max tries %s' % (conn, str(message.body)))
return self.finish(conn, message.id)
try:
# ms
requeue_delay = self.requeue_delay * message.attempts if delay else 0
conn.send(nsq.requeue(message.id, str(requeue_delay)))
except Exception:
conn.close()
logging.exception('[%s] failed to send requeue %s @ %d' % (conn, message.id, requeue_delay))
def finish(self, conn, message_id):
'''
This is an internal method for NSQReader
'''
try:
conn.send(nsq.finish(message_id))
except Exception:
conn.close()
logging.exception('[%s] failed to send finish %s' % (conn, message_id))
def connection_max_in_flight(self):
return max(1, self.max_in_flight / max(1, len(self.conns)))
def handle_message(self, conn, task, message):
conn.ready -= 1
# update ready count if necessary...
# if we're in a backoff state for this task
# set a timer to actually send the ready update
per_conn = self.connection_max_in_flight()
if not conn.is_sending_ready and (conn.ready <= 1 or conn.ready < int(per_conn * 0.25)):
backoff_interval = self.backoff_timer[task].get_interval()
if backoff_interval > 0:
conn.is_sending_ready = True
logging.info('[%s] backing off for %0.2f seconds' % (conn, backoff_interval))
send_ready_callback = functools.partial(self.send_ready, conn, per_conn)
tornado.ioloop.IOLoop.instance().add_timeout(time.time() + backoff_interval, send_ready_callback)
else:
self.send_ready(conn, per_conn)
try:
message.body = json.loads(message.body)
except Exception:
logging.warning('[%s] invalid JSON: %s' % (conn, str(message.body)))
return
logging.info('[%s] handling %s for %s' % (conn, task, str(message.body)))
self.callback(conn, task, message)
def send_ready(self, conn, value):
try:
conn.send(nsq.ready(value))
conn.ready = value
except Exception:
conn.close()
logging.exception('[%s] failed to send ready' % conn)
conn.is_sending_ready = False
def update_heartbeat(self):
heartbeat_file = tornado.options.options.heartbeat_file
if not heartbeat_file:
return
try:
open(heartbeat_file, 'a').close()
os.utime(heartbeat_file, None)
except Exception:
logging.exception('failed touching heartbeat file')
def _data_callback(self, conn, raw_data, task):
frame, data = nsq.unpack_response(raw_data)
if frame == nsq.FRAME_TYPE_MESSAGE:
message = nsq.decode_message(data)
try:
self.handle_message(conn, task, message)
except Exception:
logging.exception('[%s] failed to handle_message() %r' % (conn, message))
elif frame == nsq.FRAME_TYPE_RESPONSE and data == "_heartbeat_":
self.update_heartbeat()
conn.send(nsq.nop())
def connect_to_nsqd(self, address, port, task):
assert isinstance(address, (str, unicode))
assert isinstance(port, int)
conn_id = address + ':' + str(port) + ':' + task
if conn_id in self.conns:
return
logging.info("[%s] connecting to nsqd for '%s'", address + ':' + str(port), task)
connect_callback = functools.partial(self._connect_callback, task=task)
data_callback = functools.partial(self._data_callback, task=task)
close_callback = functools.partial(self._close_callback, task=task)
conn = async.AsyncConn(address, port, connect_callback, data_callback, close_callback)
conn.connect()
self.conns[conn_id] = conn
def _connect_callback(self, conn, task):
if len(self.task_lookup) > 1:
channel = self.channel + '.' + task
else:
channel = self.channel
initial_ready = self.connection_max_in_flight()
try:
conn.send(nsq.subscribe(self.topic, channel, self.short_hostname, self.hostname))
conn.send(nsq.ready(initial_ready))
conn.ready = initial_ready
conn.is_sending_ready = False
except Exception:
conn.close()
logging.exception('[%s] failed to bootstrap connection' % conn)
def _close_callback(self, conn, task):
conn_id = str(conn) + ':' + task
if conn_id in self.conns:
del self.conns[conn_id]
if len(self.conns) == 0 and len(self.lookupd_http_addresses) == 0:
logging.warning("all connections closed and no lookupds... exiting")
tornado.ioloop.IOLoop.instance().stop()
def query_lookupd(self):
for endpoint in self.lookupd_http_addresses:
lookupd_url = endpoint + "/lookup?topic=" + urllib.quote(self.topic)
req = tornado.httpclient.HTTPRequest(lookupd_url, method="GET",
connect_timeout=1, request_timeout=2)
callback = functools.partial(self._finish_query_lookupd, endpoint=endpoint)
self.http_client.fetch(req, callback=callback)
def _finish_query_lookupd(self, response, endpoint):
if response.error:
logging.warning("[%s] lookupd error %s", endpoint, response.error)
return
try:
lookup_data = json.loads(response.body)
except json.JSONDecodeError:
logging.warning("[%s] failed to parse JSON from lookupd: %r", endpoint, response.body)
return
if lookup_data['status_code'] != 200:
logging.warning("[%s] lookupd responded with %d", endpoint, lookup_data['status_code'])
return
for task in self.task_lookup:
for producer in lookup_data['data']['producers']:
self.connect_to_nsqd(producer['address'], producer['tcp_port'], task)
def _handle_term_signal(sig_num, frame):
logging.info('TERM Signal handler called with signal %r' % sig_num)
tornado.ioloop.IOLoop.instance().stop()
def run():
signal.signal(signal.SIGTERM, _handle_term_signal)
tornado.ioloop.IOLoop.instance().start()
|
|
# Copyright (c) 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import collections
from functools import reduce
class Graph(object):
"""A standard graph data structure.
With routines applicable to analysis of policy.
"""
class dfs_data(object):
"""Data for each node in graph during depth-first-search."""
def __init__(self, begin=None, end=None):
self.begin = begin
self.end = end
def __str__(self):
return "<begin: %s, end: %s>" % (self.begin, self.end)
class edge_data(object):
"""Data for each edge in graph."""
def __init__(self, node=None, label=None):
self.node = node
self.label = label
def __str__(self):
return "<Label:%s, Node:%s>" % (self.label, self.node)
def __eq__(self, other):
return self.node == other.node and self.label == other.label
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(str(self))
def __init__(self, graph=None):
self.edges = {} # dict from node to list of nodes
self.nodes = {} # dict from node to info about node
self._cycles = None
def __or__(self, other):
# do this the simple way so that subclasses get this code for free
g = self.__class__()
for node in self.nodes:
g.add_node(node)
for node in other.nodes:
g.add_node(node)
for name in self.edges:
for edge in self.edges[name]:
g.add_edge(name, edge.node, label=edge.label)
for name in other.edges:
for edge in other.edges[name]:
g.add_edge(name, edge.node, label=edge.label)
return g
def __ior__(self, other):
if len(other) == 0:
# no changes if other is empty
return self
self._cycles = None
for name in other.nodes:
self.add_node(name)
for name in other.edges:
for edge in other.edges[name]:
self.add_edge(name, edge.node, label=edge.label)
return self
def __len__(self):
return (len(self.nodes) +
reduce(lambda x, y: x+y,
(len(x) for x in self.edges.values()),
0))
def add_node(self, val):
"""Add node VAL to graph."""
if val not in self.nodes: # preserve old node info
self.nodes[val] = None
return True
return False
def delete_node(self, val):
"""Delete node VAL from graph and all edges."""
try:
del self.nodes[val]
del self.edges[val]
except KeyError:
assert val not in self.edges
def add_edge(self, val1, val2, label=None):
"""Add edge from VAL1 to VAL2 with label LABEL to graph.
Also adds the nodes.
"""
self._cycles = None # so that has_cycles knows it needs to rerun
self.add_node(val1)
self.add_node(val2)
val = self.edge_data(node=val2, label=label)
try:
self.edges[val1].add(val)
except KeyError:
self.edges[val1] = set([val])
def delete_edge(self, val1, val2, label=None):
"""Delete edge from VAL1 to VAL2 with label LABEL.
LABEL must match (even if None). Does not delete nodes.
"""
try:
edge = self.edge_data(node=val2, label=label)
self.edges[val1].remove(edge)
except KeyError:
# KeyError either because val1 or edge
return
self._cycles = None
def node_in(self, val):
return val in self.nodes
def edge_in(self, val1, val2, label=None):
return (val1 in self.edges and
self.edge_data(val2, label) in self.edges[val1])
def reset_nodes(self):
for node in self.nodes:
self.nodes[node] = None
def depth_first_search(self, roots=None):
"""Run depth first search on the graph.
Also modify self.nodes, self.counter, and self.cycle.
Use all nodes if @roots param is None or unspecified
"""
self.reset()
if roots is None:
roots = self.nodes
for node in roots:
if node in self.nodes and self.nodes[node].begin is None:
self.dfs(node)
def _enumerate_cycles(self):
self.reset()
for node in self.nodes.keys():
self._reset_dfs_data()
self.dfs(node, target=node)
for path in self.__target_paths:
self._cycles.add(Cycle(path))
def reset(self, roots=None):
"""Return nodes to pristine state."""
self._reset_dfs_data()
roots = roots or self.nodes
self._cycles = set()
def _reset_dfs_data(self):
for node in self.nodes.keys():
self.nodes[node] = self.dfs_data()
self.counter = 0
self.__target_paths = []
def dfs(self, node, target=None, dfs_stack=None):
"""DFS implementation.
Assumes data structures have been properly prepared.
Creates start/begin times on nodes.
Adds paths from node to target to self.__target_paths
"""
if dfs_stack is None:
dfs_stack = []
dfs_stack.append(node)
if (target is not None and node == target and
len(dfs_stack) > 1): # non-trival path to target found
self.__target_paths.append(list(dfs_stack)) # record
if self.nodes[node].begin is None:
self.nodes[node].begin = self.next_counter()
if node in self.edges:
for edge in self.edges[node]:
self.dfs(edge.node, target=target, dfs_stack=dfs_stack)
self.nodes[node].end = self.next_counter()
dfs_stack.pop()
def stratification(self, labels):
"""Return the stratification result.
Return mapping of node name to integer indicating the
stratum to which that node is assigned. LABELS is the list
of edge labels that dictate a change in strata.
"""
stratum = {}
for node in self.nodes:
stratum[node] = 1
changes = True
while changes:
changes = False
for node in self.edges:
for edge in self.edges[node]:
oldp = stratum[node]
if edge.label in labels:
stratum[node] = max(stratum[node],
1 + stratum[edge.node])
else:
stratum[node] = max(stratum[node],
stratum[edge.node])
if oldp != stratum[node]:
changes = True
if stratum[node] > len(self.nodes):
return None
return stratum
def roots(self):
"""Return list of nodes with no incoming edges."""
possible_roots = set(self.nodes)
for node in self.edges:
for edge in self.edges[node]:
if edge.node in possible_roots:
possible_roots.remove(edge.node)
return possible_roots
def has_cycle(self):
"""Checks if there are cycles.
Run depth_first_search only if it has not already been run.
"""
if self._cycles is None:
self._enumerate_cycles()
return len(self._cycles) > 0
def cycles(self):
"""Return list of cycles. None indicates unknown. """
if self._cycles is None:
self._enumerate_cycles()
cycles_list = []
for cycle_graph in self._cycles:
cycles_list.append(cycle_graph.list_repr())
return cycles_list
def dependencies(self, node):
"""Returns collection of node names reachable from NODE.
If NODE does not exist in graph, returns None.
"""
if node not in self.nodes:
return None
self.reset()
node_obj = self.nodes[node]
if node_obj is None or node_obj.begin is None or node_obj.end is None:
self.depth_first_search([node])
node_obj = self.nodes[node]
return set([n for n, dfs_obj in self.nodes.items()
if dfs_obj.begin is not None])
def next_counter(self):
"""Return next counter value and increment the counter."""
self.counter += 1
return self.counter - 1
def __str__(self):
s = "{"
for node in self.nodes:
s += "(" + str(node) + " : ["
if node in self.edges:
s += ", ".join([str(x) for x in self.edges[node]])
s += "],\n"
s += "}"
return s
def _inverted_edge_graph(self):
"""create a shallow copy of self with the edges inverted"""
newGraph = Graph()
newGraph.nodes = self.nodes
for source_node in self.edges:
for edge in self.edges[source_node]:
try:
newGraph.edges[edge.node].add(Graph.edge_data(source_node))
except KeyError:
newGraph.edges[edge.node] = set(
[Graph.edge_data(source_node)])
return newGraph
def find_dependent_nodes(self, nodes):
"""Return all nodes dependent on @nodes.
Node T is dependent on node T.
Node T is dependent on node R if there is an edge from node S to T,
and S is dependent on R.
Note that node T is dependent on node T even if T is not in the graph
"""
return (self._inverted_edge_graph().find_reachable_nodes(nodes)
| set(nodes))
def find_reachable_nodes(self, roots):
"""Return all nodes reachable from @roots."""
if len(roots) == 0:
return set()
self.depth_first_search(roots)
result = [x for x in self.nodes if self.nodes[x].begin is not None]
self.reset_nodes()
return set(result)
class Cycle(frozenset):
"""An immutable set of 2-tuples to represent a directed cycle
Extends frozenset, adding a list_repr method to represent a cycle as an
ordered list of nodes.
The set representation facilicates identity of cycles regardless of order.
The list representation is much more readable.
"""
def __new__(cls, cycle):
edge_list = []
for i in range(1, len(cycle)):
edge_list.append((cycle[i - 1], cycle[i]))
new_obj = super(Cycle, cls).__new__(cls, edge_list)
new_obj.__list_repr = list(cycle) # save copy as list_repr
return new_obj
def list_repr(self):
"""Return list-of-nodes representation of cycle"""
return self.__list_repr
class BagGraph(Graph):
"""A graph data structure with bag semantics for nodes and edges.
Keeps track of the number of times each node/edge has been inserted.
A node/edge is removed from the graph only once it has been deleted
the same number of times it was inserted. Deletions when no node/edge
already exist are ignored.
"""
def __init__(self, graph=None):
super(BagGraph, self).__init__(graph)
self._node_refcounts = {} # dict from node to counter
self._edge_refcounts = {} # dict from edge to counter
def add_node(self, val):
"""Add node VAL to graph."""
super(BagGraph, self).add_node(val)
if val in self._node_refcounts:
self._node_refcounts[val] += 1
else:
self._node_refcounts[val] = 1
def delete_node(self, val):
"""Delete node VAL from graph (but leave all edges)."""
if val not in self._node_refcounts:
return
self._node_refcounts[val] -= 1
if self._node_refcounts[val] == 0:
super(BagGraph, self).delete_node(val)
del self._node_refcounts[val]
def add_edge(self, val1, val2, label=None):
"""Add edge from VAL1 to VAL2 with label LABEL to graph.
Also adds the nodes VAL1 and VAL2 (important for refcounting).
"""
super(BagGraph, self).add_edge(val1, val2, label=label)
edge = (val1, val2, label)
if edge in self._edge_refcounts:
self._edge_refcounts[edge] += 1
else:
self._edge_refcounts[edge] = 1
def delete_edge(self, val1, val2, label=None):
"""Delete edge from VAL1 to VAL2 with label LABEL.
LABEL must match (even if None). Also deletes nodes
whenever the edge exists.
"""
edge = (val1, val2, label)
if edge not in self._edge_refcounts:
return
self.delete_node(val1)
self.delete_node(val2)
self._edge_refcounts[edge] -= 1
if self._edge_refcounts[edge] == 0:
super(BagGraph, self).delete_edge(val1, val2, label=label)
del self._edge_refcounts[edge]
def node_in(self, val):
return val in self._node_refcounts
def edge_in(self, val1, val2, label=None):
return (val1, val2, label) in self._edge_refcounts
def node_count(self, node):
if node in self._node_refcounts:
return self._node_refcounts[node]
else:
return 0
def edge_count(self, val1, val2, label=None):
edge = (val1, val2, label)
if edge in self._edge_refcounts:
return self._edge_refcounts[edge]
else:
return 0
def __len__(self):
return (reduce(lambda x, y: x+y, self._node_refcounts.values(), 0) +
reduce(lambda x, y: x+y, self._edge_refcounts.values(), 0))
def __str__(self):
s = "{"
for node in self.nodes:
s += "(%s *%s: [" % (str(node), self._node_refcounts[node])
if node in self.edges:
s += ", ".join(
["%s *%d" %
(str(x), self.edge_count(node, x.node, x.label))
for x in self.edges[node]])
s += "],\n"
s += "}"
return s
class OrderedSet(collections.MutableSet):
"""Provide sequence capabilities with rapid membership checks.
Mostly lifted from the activestate recipe[1] linked at Python's collections
documentation[2]. Some modifications, such as returning True or False from
add(key) and discard(key) if a change is made.
[1] - http://code.activestate.com/recipes/576694/
[2] - https://docs.python.org/2/library/collections.html
"""
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
return True
return False
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
return True
return False
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('pop from an empty set')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
class iterstr(object):
"""Lazily provides informal string representation of iterables.
Calling __str__ directly on iterables returns a string containing the
formal representation of the elements. This class wraps the iterable and
instead returns the informal representation of the elements.
"""
def __init__(self, iterable):
self.iterable = iterable
self._str_interp = None
self._repr_interp = None
def __str__(self):
if self._str_interp is None:
self._str_interp = "[" + ";".join(map(str, self.iterable)) + "]"
return self._str_interp
def __repr__(self):
if self._repr_interp is None:
self._repr_interp = "[" + ";".join(map(repr, self.iterable)) + "]"
return self._repr_interp
|
|
import os
import wx
import traceback
from collections import OrderedDict
from threading import Thread
from csvI import modules
from parameters import *
from quickbooks import QuickBooks
from inteumI import Inteum
welcomeText = """\
Use this program to import expenses from a spreadsheet into QuickBooks.
Make sure QuickBooks is open and that you have loaded your company file.
Choose your spreadsheet with File > Open.
"""
class MainFrame(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title=title)
self.textCtrl = wx.TextCtrl(self, style=wx.TE_MULTILINE | wx.TE_READONLY)
self.textCtrl.SetValue(welcomeText)
self.Show(True)
self.CreateStatusBar()
fileMenu = wx.Menu()
self.fileOpen = fileMenu.Append(wx.ID_OPEN, '&Open', 'Import law firm expenses into QuickBooks')
self.fileExit = fileMenu.Append(wx.ID_EXIT, 'E&xit', 'Terminate the program')
menuBar = wx.MenuBar()
menuBar.Append(fileMenu, '&File')
self.SetMenuBar(menuBar)
self.Bind(wx.EVT_MENU, self.on_fileOpen, self.fileOpen)
self.Bind(wx.EVT_MENU, self.on_fileExit, self.fileExit)
self.Show(True)
def on_fileOpen(self, e):
self.folderPath = ''
fileDialog = wx.FileDialog(self, 'Choose spreadsheet', self.folderPath, '', '*.csv', wx.OPEN)
if fileDialog.ShowModal() == wx.ID_OK:
self.folderPath = fileDialog.GetDirectory()
filePath = os.path.join(self.folderPath, fileDialog.GetFilename())
self.textCtrl.SetValue('Choose the law firm corresponding to the spreadsheet.')
lawFirmDialog = LawFirmDialog(None, 'Choose law firm')
if lawFirmDialog.ShowModal() == wx.ID_OK:
self.textCtrl.Clear()
self.fileOpen.Enable(False)
CoreThread(
lawFirmDialog.selectedModule,
filePath,
self.textCtrl.AppendText,
self.on_taskEnd,
).start()
else:
self.textCtrl.SetValue(welcomeText)
lawFirmDialog.Destroy()
fileDialog.Destroy()
def on_fileExit(self, e):
self.Close(True)
def on_taskEnd(self, isOk):
if isOk:
wx.MessageBox('Done.', 'Update complete')
else:
wx.MessageBox('Errors found', 'Update failed')
self.fileOpen.Enable(True)
class LawFirmDialog(wx.Dialog):
selectedModule = modules[0]
def __init__(self, parent, title):
wx.Dialog.__init__(self, parent, title=title)
panel = wx.Panel(self, -1)
x, y = 15, 15
self.buttonRadios = []
for moduleIndex, module in enumerate(modules):
if moduleIndex == 0:
options = dict(style=wx.RB_GROUP)
y += 25
buttonRadio = wx.RadioButton(panel, -1, module.lawFirmName.replace('&', '&&'), (x, y), **options)
buttonRadio.Bind(wx.EVT_RADIOBUTTON, self.on_buttonRadio)
self.buttonRadios.append(buttonRadio)
buttonOK = wx.Button(self, wx.ID_OK, 'OK')
buttonOK.Bind(wx.EVT_BUTTON, self.on_buttonOK)
buttonCancel = wx.Button(self, wx.ID_CANCEL, 'Cancel')
buttonCancel.Bind(wx.EVT_BUTTON, self.on_buttonCancel)
subSizer = wx.BoxSizer(wx.HORIZONTAL)
subSizer.Add(buttonOK, 0, wx.RIGHT, 20)
subSizer.Add(buttonCancel, 0, wx.LEFT, 20)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(panel, 1, wx.EXPAND)
sizer.Add(subSizer, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_BOTTOM | wx.TOP | wx.BOTTOM, 10)
self.SetSizer(sizer)
def on_buttonRadio(self, e):
button = e.GetEventObject()
self.selectedModule = modules[self.buttonRadios.index(button)]
def on_buttonOK(self, e):
self.EndModal(wx.ID_OK)
def on_buttonCancel(self, e):
self.EndModal(wx.ID_CANCEL)
class CoreThread(Thread):
def __init__(self, module, filePath, show_text, signal_end):
super(CoreThread, self).__init__()
self.module = module
self.filePath = filePath
self.show_text = show_text
self.signal_end = signal_end
def summarize_candidatePacks(self, packs):
packCount = len(packs)
self.show_text('%i candidate%s\n' % (packCount, 's' if packCount != 1 else ''))
def summarize_mismatches(self, mismatches):
mismatchCount = len(mismatches)
self.show_text('%i mismatch%s\n' % (mismatchCount, 'es' if mismatchCount != 1 else ''))
def summarize_newPacks(self, packs):
packCount = len(packs)
self.show_text('%i new\n' % packCount)
def show_error(self, error):
self.show_text('%s\n' % error)
def prompt_update(self, pack, oldPack):
self.show_text('\nMismatch:\n')
self.show_text(str(pack) + '\n')
self.show_text(str(oldPack) + '\n')
return True
def prompt_save(self, newPacks, newResults):
self.show_text('Saving...\n')
return True
def run(self):
# try:
self.show_text('Connecting to Inteum... ')
inteum = Inteum(INTEUM_DSN)
self.show_text('OK\n')
self.show_text('Loading technologies... ')
technologies = inteum.get_technologies()
self.show_text('%s\n' % len(technologies))
self.show_text('Loading patents... ')
patents = inteum.get_patents()
self.show_text('%s\n' % len(patents))
self.show_text('Loading patentTypes... ')
patentTypes = inteum.get_patentTypes()
self.show_text('%s\n' % len(patentTypes))
self.show_text('Loading lawFirms... ')
lawFirms = inteum.get_lawFirms()
self.show_text('%s\n' % len(lawFirms))
self.show_text('Loading countries... ')
countries = inteum.get_countries()
self.show_text('%s\n' % len(countries))
self.show_text('Loading expenses from spreadsheet... ')
qbr = self.module(technologies, patents, patentTypes, lawFirms, countries)
lawFirmExpenses = qbr.load_expenses(self.filePath)
self.show_text('%s\n' % len(lawFirmExpenses))
self.show_text('Connecting to QuickBooks... ')
qb = QuickBooks(applicationName=QUICKBOOKS_APPLICATION_NAME)
self.show_text('OK\n')
self.show_text('Updating vendors in QuickBooks using lawFirms from Inteum...\n')
qb.synchronize(lawFirms, 'Vendor', dict(
equal=qbr.equal_lawFirm,
parse_result=qbr.parse_vendor,
update_result=qbr.format_vendor,
format_result=qbr.format_vendor,
# expand_results=
# collapse_packs=
prompt_update=self.prompt_update,
prompt_save=self.prompt_save,
show_parse_error=self.show_error,
show_format_error=self.show_error,
summarize_candidatePacks=self.summarize_candidatePacks,
summarize_mismatches=self.summarize_mismatches,
summarize_newPacks=self.summarize_newPacks,
))
self.show_text('Updating customers in QuickBooks using technologies from Inteum...\n')
qb.synchronize(technologies, 'Customer', dict(
equal=qbr.equal_technology,
parse_result=qbr.parse_customer,
update_result=qbr.format_customer,
format_result=qbr.format_customer,
# expand_results=
# collapse_packs=
prompt_update=self.prompt_update,
prompt_save=self.prompt_save,
show_parse_error=self.show_error,
show_format_error=self.show_error,
summarize_candidatePacks=self.summarize_candidatePacks,
summarize_mismatches=self.summarize_mismatches,
summarize_newPacks=self.summarize_newPacks,
))
self.show_text('Updating jobs in QuickBooks using patents from Inteum...\n')
qb.synchronize(patents, 'Customer', dict(
equal=qbr.equal_patent,
parse_result=qbr.parse_job,
update_result=qbr.format_job,
format_result=qbr.format_job,
# expand_results=
# collapse_packs=
prompt_update=self.prompt_update,
prompt_save=self.prompt_save,
show_parse_error=self.show_error,
show_format_error=self.show_error,
summarize_candidatePacks=self.summarize_candidatePacks,
summarize_mismatches=self.summarize_mismatches,
summarize_newPacks=self.summarize_newPacks,
))
self.show_text('Updating expense accounts in QuickBooks...\n')
qb.synchronize([{'name': '6100 - Patent Related Expenses'}], 'Account', dict(
equal=lambda account1, account2: account1['name'].lower() == account2['name'].lower(),
parse_result=lambda result: {'name': result['FullName']},
# update_result=,
format_result=lambda account, show_format_error: OrderedDict([('Name', account['name']), ('AccountType', 'Expense')]),
# expand_results=,
# collapse_packs=,
# prompt_update=,
prompt_save=self.prompt_save,
show_parse_error=self.show_error,
show_format_error=self.show_error,
summarize_candidatePacks=self.summarize_candidatePacks,
summarize_mismatches=self.summarize_mismatches,
summarize_newPacks=self.summarize_newPacks,
))
self.show_text('Updating expenses in QuickBooks using expenses from spreadsheet...\n')
qb.synchronize(lawFirmExpenses, 'Bill', dict(
equal=qbr.equal_expense,
parse_result=qbr.parse_bill,
update_result=qbr.update_bill,
format_result=qbr.format_bill,
expand_results=qbr.expand_bills,
collapse_packs=qbr.collapse_expenses,
prompt_update=self.prompt_update,
prompt_save=self.prompt_save,
show_parse_error=self.show_error,
show_format_error=self.show_error,
summarize_candidatePacks=self.summarize_candidatePacks,
summarize_mismatches=self.summarize_mismatches,
summarize_newPacks=self.summarize_newPacks,
), {'IncludeLineItems': 1})
# except Exception, error:
# self.show_text('\n' + traceback.format_exc() + '\n')
# self.show_text('Failed.')
# self.signal_end(isOk=False)
# else:
# self.show_text('Done.')
# self.signal_end(isOk=True)
if __name__ == '__main__':
app = wx.App(False)
frame = MainFrame(None, QUICKBOOKS_APPLICATION_NAME)
app.MainLoop()
|
|
#!/usr/bin/env python
# encoding: utf-8
import os
from types import NoneType
from xmlrpclib import DateTime
import mock
from nose.tools import *
from webtest_plus import TestApp
from tests.base import OsfTestCase
from tests.factories import (UserFactory, ProjectFactory, NodeFactory,
AuthFactory, PointerFactory, DashboardFactory, FolderFactory, RegistrationFactory)
from framework.auth import Auth
from website.util import rubeus, api_url_for
import website.app
from website.util.rubeus import sort_by_name
from website.settings import ALL_MY_REGISTRATIONS_ID, ALL_MY_PROJECTS_ID, \
ALL_MY_PROJECTS_NAME, ALL_MY_REGISTRATIONS_NAME
app = website.app.init_app(
routes=True, set_backends=False, settings_module='website.settings'
)
class TestRubeus(OsfTestCase):
def setUp(self):
super(TestRubeus, self).setUp()
self.project = ProjectFactory.build()
self.consolidated_auth = Auth(user=self.project.creator)
self.non_authenticator = UserFactory()
self.project.save()
self.project.add_contributor(
contributor=self.non_authenticator,
auth=self.consolidated_auth,
)
self.project.add_addon('s3', self.consolidated_auth)
self.project.creator.add_addon('s3', self.consolidated_auth)
self.node_settings = self.project.get_addon('s3')
self.user_settings = self.project.creator.get_addon('s3')
self.user_settings.access_key = 'We-Will-Rock-You'
self.user_settings.secret_key = 'Idontknowanyqueensongs'
self.node_settings.bucket = 'Sheer-Heart-Attack'
self.node_settings.user_settings = self.user_settings
self.node_settings.save()
def test_hgrid_dummy(self):
node_settings = self.node_settings
node = self.project
user = Auth(self.project.creator)
# FIXME: These tests are very brittle.
expected = {
'isPointer': False,
'provider': 's3',
'addonFullname': node_settings.config.full_name,
'iconUrl': node_settings.config.icon_url,
'name': 'Amazon S3: {0}'.format(
node_settings.bucket
),
'kind': 'folder',
'accept': {
'maxSize': node_settings.config.max_file_size,
'acceptedFiles': node_settings.config.accept_extensions
},
'isAddonRoot': True,
'extra': None,
'buttons': None,
'nodeId': node._id,
'nodeUrl': node.url,
'nodeApiUrl': node.api_url,
}
permissions = {
'view': node.can_view(user),
'edit': node.can_edit(user) and not node.is_registration,
}
expected['permissions'] = permissions
actual = rubeus.build_addon_root(node_settings, node_settings.bucket, permissions=permissions)
assert actual['urls']['fetch']
assert actual['urls']['upload']
del actual['urls']
assert_equals(actual, expected)
def test_build_addon_root_has_correct_upload_limits(self):
self.node_settings.config.max_file_size = 10
self.node_settings.config.high_max_file_size = 20
node = self.project
user = self.project.creator
auth = Auth(user)
permissions = {
'view': node.can_view(auth),
'edit': node.can_edit(auth) and not node.is_registration,
}
result = rubeus.build_addon_root(
self.node_settings,
self.node_settings.bucket,
permissions=permissions,
user=user
)
assert_equal(result['accept']['maxSize'], self.node_settings.config.max_file_size)
# user now has elevated upload limit
user.system_tags.append('high_upload_limit')
user.save()
result = rubeus.build_addon_root(
self.node_settings,
self.node_settings.bucket,
permissions=permissions,
user=user
)
assert_equal(
result['accept']['maxSize'],
self.node_settings.config.high_max_file_size
)
def test_hgrid_dummy_fail(self):
node_settings = self.node_settings
node = self.project
user = Auth(self.project.creator)
rv = {
'isPointer': False,
'addon': 's3',
'addonFullname': node_settings.config.full_name,
'iconUrl': node_settings.config.icon_url,
'name': 'Amazon S3: {0}'.format(
node_settings.bucket
),
'kind': 'folder',
'permissions': {
'view': node.can_view(user),
'edit': node.can_edit(user) and not node.is_registration,
},
'urls': {
'fetch': node.api_url + 's3/hgrid/',
'upload': node.api_url + 's3/upload/'
},
'accept': {
'maxSize': node_settings.config.max_file_size,
'acceptedFiles': node_settings.config.accept_extensions
},
'isAddonRoot': True,
'nodeId': node._id,
'nodeUrl': node.url,
'nodeApiUrl': node.api_url,
}
permissions = {
'view': node.can_view(user),
'edit': node.can_edit(user) and not node.is_registration,
}
assert_not_equals(rubeus.build_addon_root(
node_settings, node_settings.bucket, permissions=permissions), rv)
def test_hgrid_dummy_overrides(self):
node_settings = self.node_settings
node = self.project
user = Auth(self.project.creator)
expected = {
'isPointer': False,
'provider': 's3',
'addonFullname': node_settings.config.full_name,
'iconUrl': node_settings.config.icon_url,
'name': 'Amazon S3: {0}'.format(
node_settings.bucket
),
'kind': 'folder',
'permissions': {
'view': node.can_view(user),
'edit': node.can_edit(user) and not node.is_registration,
},
'urls': {},
'accept': {
'maxSize': node_settings.config.max_file_size,
'acceptedFiles': node_settings.config.accept_extensions
},
'isAddonRoot': True,
'extra': None,
'buttons': None,
'nodeId': node._id,
'nodeUrl': node.url,
'nodeApiUrl': node.api_url,
}
permissions = {
'view': node.can_view(user),
'edit': node.can_edit(user) and not node.is_registration,
}
assert_equal(
rubeus.build_addon_root(
node_settings, node_settings.bucket,
permissions=permissions, urls={}
),
expected
)
def test_serialize_private_node(self):
user = UserFactory()
auth = Auth(user=user)
public = ProjectFactory.build(is_public=True)
# Add contributor with write permissions to avoid admin permission cascade
public.add_contributor(user, permissions=['read', 'write'])
public.save()
private = ProjectFactory(project=public, is_public=False)
NodeFactory(project=private)
collector = rubeus.NodeFileCollector(node=public, auth=auth)
private_dummy = collector._serialize_node(private)
assert_false(private_dummy['permissions']['edit'])
assert_false(private_dummy['permissions']['view'])
assert_equal(private_dummy['name'], 'Private Component')
assert_equal(len(private_dummy['children']), 0)
def test_collect_components_deleted(self):
node = NodeFactory(creator=self.project.creator, project=self.project)
node.is_deleted = True
collector = rubeus.NodeFileCollector(
self.project, Auth(user=UserFactory())
)
nodes = collector._collect_components(self.project, visited=[])
assert_equal(len(nodes), 0)
def test_serialized_pointer_has_flag_indicating_its_a_pointer(self):
pointer = PointerFactory()
serializer = rubeus.NodeFileCollector(node=pointer, auth=self.consolidated_auth)
ret = serializer._serialize_node(pointer)
assert_true(ret['isPointer'])
# TODO: Make this more reusable across test modules
mock_addon = mock.Mock()
serialized = {
'addon': 'mockaddon',
'name': 'Mock Addon',
'isAddonRoot': True,
'extra': '',
'permissions': {'view': True, 'edit': True},
'urls': {
'fetch': '/fetch',
'delete': '/delete'
}
}
mock_addon.config.get_hgrid_data.return_value = [serialized]
class TestSerializingNodeWithAddon(OsfTestCase):
def setUp(self):
super(TestSerializingNodeWithAddon, self).setUp()
self.auth = AuthFactory()
self.project = ProjectFactory(creator=self.auth.user)
self.project.get_addons = mock.Mock()
self.project.get_addons.return_value = [mock_addon]
self.serializer = rubeus.NodeFileCollector(node=self.project, auth=self.auth)
def test_collect_addons(self):
ret = self.serializer._collect_addons(self.project)
assert_equal(ret, [serialized])
def test_sort_by_name(self):
files = [
{'name': 'F.png'},
{'name': 'd.png'},
{'name': 'B.png'},
{'name': 'a.png'},
{'name': 'c.png'},
{'name': 'e.png'},
{'name': 'g.png'},
]
sorted_files = [
{'name': 'a.png'},
{'name': 'B.png'},
{'name': 'c.png'},
{'name': 'd.png'},
{'name': 'e.png'},
{'name': 'F.png'},
{'name': 'g.png'},
]
ret = sort_by_name(files)
for index, value in enumerate(ret):
assert_equal(value['name'], sorted_files[index]['name'])
def test_sort_by_name_none(self):
files = None
sorted_files = None
ret = sort_by_name(files)
assert_equal(ret, sorted_files)
def test_serialize_node(self):
ret = self.serializer._serialize_node(self.project)
assert_equal(
len(ret['children']),
len(self.project.get_addons.return_value) + len(self.project.nodes)
)
assert_equal(ret['kind'], rubeus.FOLDER)
assert_equal(ret['name'], 'Project: {0}'.format(self.project.title))
assert_equal(
ret['permissions'],
{
'view': True,
'edit': True,
}
)
assert_equal(
ret['urls'],
{
'upload': None,
'fetch': None,
},
)
def test_collect_js_recursive(self):
self.project.get_addons.return_value[0].config.include_js = {'files': ['foo.js']}
self.project.get_addons.return_value[0].config.short_name = 'dropbox'
node = NodeFactory(project=self.project)
mock_node_addon = mock.Mock()
mock_node_addon.config.include_js = {'files': ['bar.js', 'baz.js']}
mock_node_addon.config.short_name = 'dropbox'
node.get_addons = mock.Mock()
node.get_addons.return_value = [mock_node_addon]
result = rubeus.collect_addon_js(self.project)
assert_in('foo.js', result)
assert_in('bar.js', result)
assert_in('baz.js', result)
def test_collect_js_unique(self):
self.project.get_addons.return_value[0].config.include_js = {'files': ['foo.js']}
self.project.get_addons.return_value[0].config.short_name = 'dropbox'
node = NodeFactory(project=self.project)
mock_node_addon = mock.Mock()
mock_node_addon.config.include_js = {'files': ['foo.js', 'baz.js']}
mock_node_addon.config.short_name = 'dropbox'
node.get_addons = mock.Mock()
node.get_addons.return_value = [mock_node_addon]
result = rubeus.collect_addon_js(self.project)
assert_in('foo.js', result)
assert_in('baz.js', result)
class TestSerializingEmptyDashboard(OsfTestCase):
def setUp(self):
super(TestSerializingEmptyDashboard, self).setUp()
self.dash = DashboardFactory()
self.auth = AuthFactory(user=self.dash.creator)
self.dash_hgrid = rubeus.to_project_hgrid(self.dash, self.auth)
def test_empty_dashboard_hgrid_representation_is_list(self):
assert_is_instance(self.dash_hgrid, list)
def test_empty_dashboard_has_proper_number_of_smart_folders(self):
assert_equal(len(self.dash_hgrid), 2)
def test_empty_dashboard_smart_folders_have_correct_names_and_ids(self):
for node_hgrid in self.dash_hgrid:
assert_in(node_hgrid['name'], (ALL_MY_PROJECTS_NAME, ALL_MY_REGISTRATIONS_NAME))
for node_hgrid in self.dash_hgrid:
if node_hgrid['name'] == ALL_MY_PROJECTS_ID:
assert_equal(node_hgrid['node_id'], ALL_MY_PROJECTS_ID)
elif node_hgrid['name'] == ALL_MY_REGISTRATIONS_ID:
assert_equal(node_hgrid['node_id'], ALL_MY_REGISTRATIONS_ID)
def test_empty_dashboard_smart_folders_are_empty(self):
for node_hgrid in self.dash_hgrid:
assert_equal(node_hgrid['children'], [])
def test_empty_dashboard_are_valid_folders(self):
for node in self.dash_hgrid:
assert_valid_hgrid_folder(node)
def test_empty_dashboard_smart_folders_are_valid_smart_folders(self):
for node in self.dash_hgrid:
assert_valid_hgrid_smart_folder(node)
class TestSerializingPopulatedDashboard(OsfTestCase):
def setUp(self):
super(TestSerializingPopulatedDashboard, self).setUp()
self.dash = DashboardFactory()
self.user = self.dash.creator
self.auth = AuthFactory(user=self.user)
self.init_dash_hgrid = rubeus.to_project_hgrid(self.dash, self.auth)
def test_dashboard_adding_one_folder_increases_size_by_one(self):
folder = FolderFactory(creator=self.user)
self.dash.add_pointer(folder, self.auth)
dash_hgrid = rubeus.to_project_hgrid(self.dash, self.auth)
assert_equal(len(dash_hgrid), len(self.init_dash_hgrid) + 1)
def test_dashboard_adding_one_folder_does_not_remove_smart_folders(self):
folder = FolderFactory(creator=self.user)
self.dash.add_pointer(folder, self.auth)
dash_hgrid = rubeus.to_project_hgrid(self.dash, self.auth)
assert_true(
{ALL_MY_PROJECTS_NAME, ALL_MY_REGISTRATIONS_NAME, folder.title} <=
{node_hgrid['name'] for node_hgrid in dash_hgrid}
)
def test_dashboard_adding_one_folder_increases_size_by_one_in_hgrid_representation(self):
folder = FolderFactory(creator=self.user)
self.dash.add_pointer(folder, self.auth)
project = ProjectFactory(creator=self.user)
folder.add_pointer(project,self.auth)
dash_hgrid = rubeus.to_project_hgrid(self.dash, self.auth)
assert_equal(len(dash_hgrid), len(self.init_dash_hgrid) + 1)
class TestSerializingFolders(OsfTestCase):
def setUp(self):
super(TestSerializingFolders, self).setUp()
self.user = UserFactory()
self.auth = AuthFactory(user=self.user)
def test_serialized_folder_is_valid_folder(self):
folder = FolderFactory(creator=self.user)
folder_hgrid = rubeus.to_project_hgrid(folder, self.auth)
assert_equal(folder_hgrid, [])
def test_serialize_folder_containing_folder_increases_size_by_one(self):
outer_folder = FolderFactory(creator=self.user)
folder_hgrid = rubeus.to_project_hgrid(outer_folder, self.auth)
inner_folder = FolderFactory(creator=self.user)
outer_folder.add_pointer(inner_folder, self.auth)
new_hgrid = rubeus.to_project_hgrid(outer_folder, self.auth)
assert_equal(len(folder_hgrid) + 1, len(new_hgrid))
class TestSmartFolderViews(OsfTestCase):
def setUp(self):
super(TestSmartFolderViews, self).setUp()
self.app = TestApp(app)
self.dash = DashboardFactory()
self.user = self.dash.creator
self.auth = AuthFactory(user=self.user)
@mock.patch('website.project.decorators.Auth.from_kwargs')
def test_adding_project_to_dashboard_increases_json_size_by_one(self, mock_from_kwargs):
mock_from_kwargs.return_value = Auth(user=self.user)
with app.test_request_context():
url = api_url_for('get_dashboard')
res = self.app.get(url + ALL_MY_PROJECTS_ID)
import pprint;pp = pprint.PrettyPrinter()
init_len = len(res.json[u'data'])
ProjectFactory(creator=self.user)
res = self.app.get(url + ALL_MY_PROJECTS_ID)
assert_equal(len(res.json[u'data']), init_len + 1)
@mock.patch('website.project.decorators.Auth.from_kwargs')
def test_adding_registration_to_dashboard_increases_json_size_by_one(self, mock_from_kwargs):
mock_from_kwargs.return_value = Auth(user=self.user)
with app.test_request_context():
url = api_url_for('get_dashboard')
res = self.app.get(url + ALL_MY_REGISTRATIONS_ID)
init_len = len(res.json[u'data'])
RegistrationFactory(creator=self.user)
res = self.app.get(url + ALL_MY_REGISTRATIONS_ID)
assert_equal(len(res.json[u'data']), init_len + 1)
def assert_valid_hgrid_folder(node_hgrid):
folder_types = {
'name': str,
'children': list,
'contributors': list,
'dateModified': (DateTime, NoneType),
'node_id': str,
'modifiedDelta': int,
'modifiedBy': (dict, NoneType),
'urls': dict,
'isDashboard': bool,
'expand': bool,
'permissions': dict,
'isSmartFolder': bool,
'childrenCount': int,
}
keys_types = {
'urls': (str, NoneType),
'permissions': bool,
}
folder_values = {
'parentIsFolder': True,
'isPointer': False,
'isFolder': True,
'kind': 'folder',
'type': 'smart-folder'
}
if isinstance(node_hgrid, list):
node_hgrid = node_hgrid[0]['data']
else:
assert_is_instance(node_hgrid, dict)
for key, correct_value in folder_values.items():
assert_equal(node_hgrid[key], correct_value)
for key, correct_type in folder_types.items():
assert_is_instance(node_hgrid[key], correct_type)
for key, correct_type in keys_types.items():
for inner_key, inner_value in node_hgrid[key].items():
assert_is_instance(inner_value, correct_type)
valid_keys = set(folder_types.keys()).union(folder_values.keys())
for key in node_hgrid.keys():
assert_in(key, valid_keys)
def assert_valid_hgrid_smart_folder(node_hgrid):
smart_folder_values = {
'contributors': [],
'isPointer': False,
'dateModified': None,
'modifiedDelta': 0,
'modifiedBy': None,
'isSmartFolder': True,
'urls': {
'upload': None,
'fetch': None
},
'isDashboard': False,
'permissions': {
'edit': False,
'acceptsDrops': False,
'copyable': False,
'movable': False,
'view': True
}
}
assert_valid_hgrid_folder(node_hgrid)
for attr, correct_value in smart_folder_values.items():
assert_equal(correct_value, node_hgrid[attr])
|
|
# # (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import mock
import unittest
import os
# 3p
import simplejson as json
from nose.plugins.attrib import attr
# project
from tests.checks.common import AgentCheckTest, Fixtures
from checks import AgentCheck
from utils.kubernetes.kubeutil import KubeUtil, detect_is_k8s
CPU = "CPU"
MEM = "MEM"
FS = "fs"
NET = "net"
NET_ERRORS = "net_errors"
DISK = "disk"
DISK_USAGE = "disk_usage"
PODS = "pods"
LIM = "limits"
REQ = "requests"
CAP = "capacity"
FIXTURE_DIR = os.path.join(os.path.dirname(__file__), 'ci')
METRICS = [
('kubernetes.memory.usage', MEM),
('kubernetes.filesystem.usage', FS),
('kubernetes.filesystem.usage_pct', FS),
('kubernetes.cpu.usage.total', CPU),
('kubernetes.network.tx_bytes', NET),
('kubernetes.network.rx_bytes', NET),
('kubernetes.network_errors', NET_ERRORS),
('kubernetes.diskio.io_service_bytes.stats.total', DISK),
('kubernetes.filesystem.usage_pct', DISK_USAGE),
('kubernetes.filesystem.usage', DISK_USAGE),
('kubernetes.pods.running', PODS),
('kubernetes.cpu.limits', LIM),
('kubernetes.cpu.requests', REQ),
('kubernetes.cpu.capacity', CAP),
('kubernetes.memory.limits', LIM),
('kubernetes.memory.requests', REQ),
('kubernetes.memory.capacity', CAP),
]
class MockResponse:
"""
Helper class to mock a json response from requests
"""
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
class MockIterLinesResponse:
"""
Helper class to mock a text response from requests
"""
def __init__(self, lines_array, status_code):
self.lines_array = lines_array
self.status_code = status_code
def iter_lines(self):
for line in self.lines_array:
yield line
def KubeUtil_fake_retrieve_json_auth(url, timeout=10, params=None):
if url.endswith("/namespaces"):
return MockResponse(json.loads(Fixtures.read_file("namespaces.json", sdk_dir=FIXTURE_DIR, string_escape=False)), 200)
if url.endswith("/events"):
return MockResponse(json.loads(Fixtures.read_file("events.json", sdk_dir=FIXTURE_DIR, string_escape=False)), 200)
return {}
@attr(requires='kubernetes')
class TestKubernetes(AgentCheckTest):
CHECK_NAME = 'kubernetes'
@mock.patch('utils.kubernetes.KubeUtil.retrieve_json_auth')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_machine_info')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_metrics',
side_effect=lambda: json.loads(Fixtures.read_file("metrics_1.1.json", sdk_dir=FIXTURE_DIR)))
@mock.patch('utils.kubernetes.KubeUtil.retrieve_pods_list',
side_effect=lambda: json.loads(Fixtures.read_file("pods_list_1.1.json", sdk_dir=FIXTURE_DIR, string_escape=False)))
@mock.patch('utils.kubernetes.KubeUtil._locate_kubelet', return_value='http://172.17.0.1:10255')
def test_fail_1_1(self, *args):
# To avoid the disparition of some gauges during the second check
config = {
"instances": [{"host": "foo"}]
}
# Can't use run_check_twice due to specific metrics
self.run_check(config, force_reload=True)
self.assertServiceCheck("kubernetes.kubelet.check", status=AgentCheck.CRITICAL, tags=None, count=1)
@mock.patch('utils.kubernetes.KubeUtil.retrieve_json_auth')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_machine_info')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_metrics',
side_effect=lambda: json.loads(Fixtures.read_file("metrics_1.1.json", sdk_dir=FIXTURE_DIR)))
@mock.patch('utils.kubernetes.KubeUtil.retrieve_pods_list',
side_effect=lambda: json.loads(Fixtures.read_file("pods_list_1.1.json", sdk_dir=FIXTURE_DIR, string_escape=False)))
@mock.patch('utils.kubernetes.KubeUtil._locate_kubelet', return_value='http://172.17.0.1:10255')
def test_metrics_1_1(self, *args):
# To avoid the disparition of some gauges during the second check
mocks = {
'_perform_kubelet_checks': lambda x,y: None,
}
config = {
"instances": [
{
"host": "foo",
"enable_kubelet_checks": False
}
]
}
# Can't use run_check_twice due to specific metrics
self.run_check_twice(config, mocks=mocks, force_reload=True)
expected_tags = [
(['kube_replication_controller:propjoe', 'kube_namespace:default', 'container_name:k8s_POD.e4cc795_propjoe-dhdzk_default_ba151259-36e0-11e5-84ce-42010af01c62_ef0ed5f9', 'pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'container_name:k8s_POD.2688308a_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_295f14ff', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'container_name:k8s_etcd.2e44beff_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_e3e504ad', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
(['kube_replication_controller:fluentd-cloud-logging-kubernetes-minion', 'kube_namespace:kube-system', 'container_name:k8s_POD.e4cc795_fluentd-cloud-logging-kubernetes-minion-mu4w_kube-system_d0feac1ad02da9e97c4bf67970ece7a1_49dd977d', 'pod_name:kube-system/fluentd-cloud-logging-kubernetes-minion-mu4w'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
(['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'container_name:k8s_skydns.1e752dc0_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_7c1345a1', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:propjoe', 'kube_namespace:default', 'container_name:k8s_propjoe.21f63023_propjoe-dhdzk_default_ba151259-36e0-11e5-84ce-42010af01c62_19879457', 'pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:kube-ui-v1', 'kube_namespace:kube-system', 'container_name:k8s_POD.3b46e8b9_kube-ui-v1-sv2sq_kube-system_b7e8f250-3619-11e5-84ce-42010af01c62_209ed1dc', 'pod_name:kube-system/kube-ui-v1-sv2sq'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'container_name:k8s_kube2sky.1afa6a47_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_624bc34c', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:propjoe', 'kube_namespace:default', 'container_name:k8s_POD.e4cc795_propjoe-lkc3l_default_3a9b1759-4055-11e5-84ce-42010af01c62_45d1185b', 'pod_name:default/propjoe-lkc3l'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:haproxy-6db79c7bbcac01601ac35bcdb18868b3', 'kube_namespace:default', 'container_name:k8s_POD.e4cc795_haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la_default_86527bf8-36cd-11e5-84ce-42010af01c62_5ad59bf3', 'pod_name:default/haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:haproxy-6db79c7bbcac01601ac35bcdb18868b3', 'kube_namespace:default', 'container_name:k8s_haproxy.69b6303b_haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la_default_86527bf8-36cd-11e5-84ce-42010af01c62_a35b9731', 'pod_name:default/haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:kube-ui-v1','kube_namespace:kube-system', 'container_name:k8s_kube-ui.c17839c_kube-ui-v1-sv2sq_kube-system_b7e8f250-3619-11e5-84ce-42010af01c62_d2b9aa90', 'pod_name:kube-system/kube-ui-v1-sv2sq'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:propjoe','kube_namespace:default', 'container_name:k8s_propjoe.21f63023_propjoe-lkc3l_default_3a9b1759-4055-11e5-84ce-42010af01c62_9fe8b7b0', 'pod_name:default/propjoe-lkc3l'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:kube-dns-v8','kube_namespace:kube-system', 'container_name:k8s_healthz.4469a25d_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_241c34d1', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
(['kube_replication_controller:fluentd-cloud-logging-kubernetes-minion','kube_namespace:kube-system', 'container_name:k8s_fluentd-cloud-logging.7721935b_fluentd-cloud-logging-kubernetes-minion-mu4w_kube-system_d0feac1ad02da9e97c4bf67970ece7a1_2c3c0879', 'pod_name:kube-system/fluentd-cloud-logging-kubernetes-minion-mu4w'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
(['container_name:dd-agent', 'pod_name:no_pod'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
(['kube_replication_controller:l7-lb-controller', 'kube_namespace:kube-system'], [PODS]),
(['kube_replication_controller:redis-slave', 'kube_namespace:default'], [PODS]),
(['kube_replication_controller:frontend', 'kube_namespace:default'], [PODS]),
(['kube_namespace:kube-system'], [PODS]),
(['kube_replication_controller:heapster-v11', 'kube_namespace:kube-system'], [PODS]),
([], [LIM, REQ, CAP]) # container from kubernetes api doesn't have a corresponding entry in Cadvisor
]
for m, _type in METRICS:
for tags, types in expected_tags:
if _type in types:
self.assertMetric(m, count=1, tags=tags)
self.coverage_report()
@mock.patch('utils.kubernetes.KubeUtil.retrieve_json_auth')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_machine_info')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_metrics',
side_effect=lambda: json.loads(Fixtures.read_file("metrics_1.1.json", sdk_dir=FIXTURE_DIR)))
@mock.patch('utils.kubernetes.KubeUtil.retrieve_pods_list',
side_effect=lambda: json.loads(Fixtures.read_file("pods_list_1.1.json", sdk_dir=FIXTURE_DIR, string_escape=False)))
@mock.patch('utils.kubernetes.KubeUtil._locate_kubelet', return_value='http://172.17.0.1:10255')
def test_historate_1_1(self, *args):
# To avoid the disparition of some gauges during the second check
mocks = {
'_perform_kubelet_checks': lambda x,y: None,
}
config = {
"instances": [
{
"host": "foo",
"enable_kubelet_checks": False,
"use_histogram": True,
}
]
}
# Can't use run_check_twice due to specific metrics
self.run_check_twice(config, mocks=mocks, force_reload=True)
metric_suffix = ["count", "avg", "median", "max", "95percentile"]
expected_tags = [
(['pod_name:no_pod'], [MEM, CPU, NET, DISK, DISK_USAGE, NET_ERRORS]),
(['kube_replication_controller:propjoe', 'kube_namespace:default', 'pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
(['kube_replication_controller:fluentd-cloud-logging-kubernetes-minion', 'kube_namespace:kube-system', 'pod_name:kube-system/fluentd-cloud-logging-kubernetes-minion-mu4w'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
(['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:propjoe', 'kube_namespace:default', 'pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:kube-ui-v1','kube_namespace:kube-system', 'pod_name:kube-system/kube-ui-v1-sv2sq'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:propjoe', 'kube_namespace:default', 'pod_name:default/propjoe-lkc3l'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:haproxy-6db79c7bbcac01601ac35bcdb18868b3', 'kube_namespace:default', 'pod_name:default/haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:l7-lb-controller', 'kube_namespace:kube-system'], [PODS]),
(['kube_replication_controller:redis-slave', 'kube_namespace:default'], [PODS]),
(['kube_replication_controller:frontend', 'kube_namespace:default'], [PODS]),
(['kube_replication_controller:heapster-v11', 'kube_namespace:kube-system'], [PODS]),
(['kube_namespace:kube-system'], [PODS]),
([], [LIM, REQ, CAP]) # container from kubernetes api doesn't have a corresponding entry in Cadvisor
]
for m, _type in METRICS:
for m_suffix in metric_suffix:
for tags, types in expected_tags:
if _type in types:
self.assertMetric("{0}.{1}".format(m, m_suffix), count=1, tags=tags)
self.coverage_report()
@mock.patch('utils.kubernetes.KubeUtil.retrieve_json_auth')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_machine_info',
side_effect=lambda: json.loads(Fixtures.read_file("machine_info_1.2.json", sdk_dir=FIXTURE_DIR)))
@mock.patch('utils.kubernetes.KubeUtil.retrieve_metrics',
side_effect=lambda: json.loads(Fixtures.read_file("metrics_1.2.json", sdk_dir=FIXTURE_DIR)))
@mock.patch('utils.kubernetes.KubeUtil.retrieve_pods_list',
side_effect=lambda: json.loads(Fixtures.read_file("pods_list_1.2.json", sdk_dir=FIXTURE_DIR, string_escape=False)))
@mock.patch('utils.kubernetes.KubeUtil._locate_kubelet', return_value='http://172.17.0.1:10255')
def test_fail_1_2(self, *args):
# To avoid the disparition of some gauges during the second check
config = {
"instances": [{"host": "foo"}]
}
# Can't use run_check_twice due to specific metrics
self.run_check(config, force_reload=True)
self.assertServiceCheck("kubernetes.kubelet.check", status=AgentCheck.CRITICAL)
@mock.patch('utils.kubernetes.KubeUtil.retrieve_json_auth')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_machine_info',
side_effect=lambda: json.loads(Fixtures.read_file("machine_info_1.2.json", sdk_dir=FIXTURE_DIR)))
@mock.patch('utils.kubernetes.KubeUtil.retrieve_metrics',
side_effect=lambda: json.loads(Fixtures.read_file("metrics_1.2.json", sdk_dir=FIXTURE_DIR)))
@mock.patch('utils.kubernetes.KubeUtil.retrieve_pods_list',
side_effect=lambda: json.loads(Fixtures.read_file("pods_list_1.2.json", sdk_dir=FIXTURE_DIR, string_escape=False)))
@mock.patch('utils.kubernetes.KubeUtil._locate_kubelet', return_value='http://172.17.0.1:10255')
def test_metrics_1_2(self, *args):
mocks = {
'_perform_kubelet_checks': lambda x,y: None,
}
config = {
"instances": [
{
"host": "foo",
"enable_kubelet_checks": False
}
]
}
# Can't use run_check_twice due to specific metrics
self.run_check_twice(config, mocks=mocks, force_reload=True)
expected_tags = [
(['container_name:k8s_POD.35220667_dd-agent-1rxlh_default_12c7be82-33ca-11e6-ac8f-42010af00003_f5cf585f',
'container_image:gcr.io/google_containers/pause:2.0', 'image_name:gcr.io/google_containers/pause',
'image_tag:2.0', 'pod_name:dd-agent-1rxlh', 'kube_namespace:default', 'kube_app:dd-agent',
'kube_foo:bar','kube_bar:baz', 'kube_replication_controller:dd-agent', 'kube_daemon_set:dd-agent', 'kube_container_name:POD'],
[MEM, CPU, FS, NET, NET_ERRORS]),
(['container_name:k8s_dd-agent.7b520f3f_dd-agent-1rxlh_default_12c7be82-33ca-11e6-ac8f-42010af00003_321fecb4',
'container_image:datadog/docker-dd-agent:massi_ingest_k8s_events', 'image_name:datadog/docker-dd-agent',
'image_tag:massi_ingest_k8s_events','pod_name:dd-agent-1rxlh',
'kube_namespace:default', 'kube_app:dd-agent', 'kube_foo:bar',
'kube_bar:baz', 'kube_replication_controller:dd-agent', 'kube_daemon_set:dd-agent', 'kube_container_name:dd-agent'], [LIM, REQ, MEM, CPU, NET, DISK, DISK_USAGE]),
(['kube_replication_controller:dd-agent', 'kube_namespace:default', 'kube_daemon_set:dd-agent'], [PODS]),
([], [LIM, REQ, CAP]) # container from kubernetes api doesn't have a corresponding entry in Cadvisor
]
for m, _type in METRICS:
for tags, types in expected_tags:
if _type in types:
self.assertMetric(m, count=1, tags=tags)
# Verify exact capacity values read from machine_info_1.2.json fixture.
self.assertMetric('kubernetes.cpu.capacity', value=2)
self.assertMetric('kubernetes.memory.capacity', value=8391204864)
self.coverage_report()
@mock.patch('utils.kubernetes.KubeUtil.retrieve_json_auth')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_machine_info',
side_effect=lambda: json.loads(Fixtures.read_file("machine_info_1.2.json", sdk_dir=FIXTURE_DIR)))
@mock.patch('utils.kubernetes.KubeUtil.retrieve_metrics',
side_effect=lambda: json.loads(Fixtures.read_file("metrics_1.2.json", sdk_dir=FIXTURE_DIR)))
@mock.patch('utils.kubernetes.KubeUtil.retrieve_pods_list',
side_effect=lambda: json.loads(Fixtures.read_file("pods_list_1.2.json", sdk_dir=FIXTURE_DIR, string_escape=False)))
@mock.patch('utils.kubernetes.KubeUtil._locate_kubelet', return_value='http://172.17.0.1:10255')
def test_historate_1_2(self, *args):
# To avoid the disparition of some gauges during the second check
mocks = {
'_perform_kubelet_checks': lambda x,y: None,
}
config = {
"instances": [
{
"host": "foo",
"enable_kubelet_checks": False,
"use_histogram": True,
}
]
}
# Can't use run_check_twice due to specific metrics
self.run_check_twice(config, mocks=mocks, force_reload=True)
metric_suffix = ["count", "avg", "median", "max", "95percentile"]
expected_tags = [
(['container_image:datadog/docker-dd-agent:massi_ingest_k8s_events', 'image_name:datadog/docker-dd-agent',
'image_tag:massi_ingest_k8s_events', 'pod_name:dd-agent-1rxlh',
'kube_namespace:default', 'kube_app:dd-agent', 'kube_foo:bar','kube_bar:baz',
'kube_replication_controller:dd-agent', 'kube_daemon_set:dd-agent', 'kube_container_name:dd-agent'], [MEM, CPU, NET, DISK, DISK_USAGE, LIM, REQ]),
(['container_image:gcr.io/google_containers/pause:2.0', 'image_name:gcr.io/google_containers/pause',
'image_tag:2.0', 'pod_name:dd-agent-1rxlh',
'kube_namespace:default', 'kube_app:dd-agent', 'kube_foo:bar','kube_bar:baz',
'kube_replication_controller:dd-agent', 'kube_daemon_set:dd-agent', 'kube_container_name:POD'], [MEM, CPU, NET, NET_ERRORS, DISK_USAGE]),
(['kube_replication_controller:dd-agent', 'kube_namespace:default', 'kube_daemon_set:dd-agent'], [PODS]),
([], [LIM, REQ, CAP]) # container from kubernetes api doesn't have a corresponding entry in Cadvisor
]
for m, _type in METRICS:
for m_suffix in metric_suffix:
for tags, types in expected_tags:
if _type in types:
self.assertMetric("{0}.{1}".format(m, m_suffix), count=1, tags=tags)
self.coverage_report()
@mock.patch('utils.kubernetes.KubeUtil.get_node_info',
side_effect=lambda: ('Foo', 'Bar'))
@mock.patch('utils.kubernetes.KubeUtil.retrieve_json_auth',
side_effect=KubeUtil_fake_retrieve_json_auth)
@mock.patch('utils.kubernetes.KubeUtil.retrieve_machine_info')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_metrics')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_pods_list',
side_effect=lambda: json.loads(Fixtures.read_file("pods_list_1.2.json", sdk_dir=FIXTURE_DIR, string_escape=False)))
@mock.patch('utils.kubernetes.KubeUtil._locate_kubelet', return_value='http://172.17.0.1:10255')
def test_events(self, *args):
# default value for collect_events is False
config = {'instances': [{'host': 'foo'}]}
self.run_check(config, force_reload=True)
self.assertEvent('hello-node-47289321-91tfd Scheduled on Bar', count=0, exact_match=False)
# again, with the feature enabled
config = {'instances': [{'host': 'bar', 'collect_events': True}]}
self.run_check(config, force_reload=True)
self.assertEvent('hello-node-47289321-91tfd Scheduled on Bar', count=1, exact_match=False)
# with no namespaces, only catch event from 'default'
self.assertEvent('dd-agent-a769 SuccessfulDelete on Bar', count=0, exact_match=False)
# again, now the timestamp is set and the event is discarded b/c too old
self.run_check(config)
self.assertEvent('hello-node-47289321-91tfd Scheduled on Bar', count=0, exact_match=False)
@mock.patch('utils.kubernetes.KubeUtil.get_node_info',
side_effect=lambda: ('Foo', 'Bar'))
@mock.patch('utils.kubernetes.KubeUtil.retrieve_json_auth',
side_effect=KubeUtil_fake_retrieve_json_auth)
@mock.patch('utils.kubernetes.KubeUtil.retrieve_machine_info')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_metrics')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_pods_list')
@mock.patch('utils.kubernetes.KubeUtil._locate_kubelet', return_value='http://172.17.0.1:10255')
def test_namespaced_events(self, *args):
# reset last event pulling time
KubeUtil().last_event_collection_ts = 0
# Verify that we are retro compatible with the old 'namespace' configuration key
config = {'instances': [{'host': 'bar', 'collect_events': True, 'namespace': 'test-namespace-1'}]}
self.run_check(config, force_reload=True)
self.assertEvent('dd-agent-a769 SuccessfulDelete on Bar', count=1, exact_match=False)
self.assertEvent('hello-node-47289321-91tfd Scheduled on Bar', count=1, exact_match=False)
# reset last event pulling time
KubeUtil().last_event_collection_ts = 0
# Using 'namespaces' list
config = {'instances': [{'host': 'bar', 'collect_events': True, 'namespaces': ['test-namespace-1', 'test-namespace-2']}]}
self.run_check(config, force_reload=True)
self.assertEvent('dd-agent-a769 SuccessfulDelete on Bar', count=1, exact_match=False)
self.assertEvent('hello-node-47289321-91tfd Scheduled on Bar', count=0, exact_match=False)
# reset last event pulling time
KubeUtil().last_event_collection_ts = 0
# Using 'namespace_name_regexp' (since 'namespaces' is not set it should
# fallback to ['default'] and add any namespaces that matched with the regexp
config = {'instances': [{'host': 'bar', 'collect_events': True, 'namespace_name_regexp': 'test-namespace.*'}]}
self.run_check(config, force_reload=True)
self.assertEvent('dd-agent-a769 SuccessfulDelete on Bar', count=1, exact_match=False)
self.assertEvent('hello-node-47289321-91tfd Scheduled on Bar', count=1, exact_match=False)
# reset last event pulling time
KubeUtil().last_event_collection_ts = 0
# muting the 'default' namespace
config = {'instances': [{'host': 'bar', 'collect_events': True, 'namespaces': [], 'namespace_name_regexp': 'test-namespace.*'}]}
self.run_check(config, force_reload=True)
self.assertEvent('dd-agent-a769 SuccessfulDelete on Bar', count=1, exact_match=False)
self.assertEvent('hello-node-47289321-91tfd Scheduled on Bar', count=0, exact_match=False)
@mock.patch('utils.kubernetes.KubeUtil.retrieve_json_auth')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_machine_info')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_metrics',
side_effect=Exception("Connection error"))
@mock.patch('utils.kubernetes.KubeUtil.retrieve_pods_list',
side_effect=Exception("Connection error"))
@mock.patch('utils.kubernetes.KubeUtil._locate_kubelet', return_value='http://172.17.0.1:10255')
def test_kubelet_fail(self, *args):
# To avoid the disparition of some gauges during the second check
config = {
"instances": [{"host": "foo"}]
}
# Can't use run_check_twice due to specific metrics
self.run_check(config, force_reload=True)
self.assertServiceCheck("kubernetes.kubelet.check", status=AgentCheck.CRITICAL, tags=None, count=1)
@mock.patch('utils.kubernetes.KubeUtil.retrieve_json_auth')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_machine_info')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_metrics',
side_effect=Exception("Connection error"))
@mock.patch('utils.kubernetes.KubeUtil.retrieve_pods_list',
side_effect=Exception("Connection error"))
@mock.patch('utils.kubernetes.KubeUtil._locate_kubelet', return_value='http://172.17.0.1:10255')
def test_fail_service_check_tagging(self, *args):
# To avoid the disparition of some gauges during the second check
config = {
"instances": [{"host": "foo", "tags":["tag:foo","tag:bar"]}]
}
# Can't use run_check_twice due to specific metrics
self.run_check(config, force_reload=True)
self.assertServiceCheck("kubernetes.kubelet.check", status=AgentCheck.CRITICAL, tags=["tag:foo","tag:bar"], count=1)
@mock.patch('utils.kubernetes.KubeUtil.retrieve_json_auth')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_machine_info')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_metrics',
side_effect=Exception("Connection error"))
@mock.patch('utils.kubernetes.KubeUtil.retrieve_pods_list',
side_effect=Exception("Connection error"))
@mock.patch('utils.kubernetes.KubeUtil._locate_kubelet',
return_value='http://172.17.0.1:10255')
@mock.patch('utils.kubernetes.KubeUtil.perform_kubelet_query',
return_value=MockIterLinesResponse(["[+]ping ok","healthz check passed"], 200))
def test_ok_service_check_tagging(self, *args):
# To avoid the disparition of some gauges during the second check
config = {
"instances": [{"host": "foo", "tags":["tag:foo","tag:bar"]}]
}
# Can't use run_check_twice due to specific metrics
self.run_check(config, force_reload=True)
self.assertServiceCheck("kubernetes.kubelet.check", status=AgentCheck.OK, tags=["tag:foo","tag:bar"], count=1)
self.assertServiceCheck("kubernetes.kubelet.check.ping", status=AgentCheck.OK, tags=["tag:foo","tag:bar"], count=1)
@mock.patch('utils.kubernetes.KubeUtil.retrieve_json_auth')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_machine_info')
@mock.patch('utils.kubernetes.KubeUtil.retrieve_metrics',
side_effect=Exception("Connection error"))
@mock.patch('utils.kubernetes.KubeUtil.retrieve_pods_list',
side_effect=Exception("Connection error"))
@mock.patch('utils.kubernetes.KubeUtil._locate_kubelet',
return_value='http://172.17.0.1:10255')
@mock.patch('utils.kubernetes.KubeUtil.perform_kubelet_query',
return_value=MockIterLinesResponse(["[-]ping failed: reason withheld",
"healthz check failed"], 200))
def test_critical_service_check_tagging(self, *args):
# To avoid the disparition of some gauges during the second check
config = {
"instances": [{"host": "foo", "tags":["tag:foo","tag:bar"]}]
}
# Can't use run_check_twice due to specific metrics
self.run_check(config, force_reload=True)
self.assertServiceCheck("kubernetes.kubelet.check", status=AgentCheck.CRITICAL, tags=["tag:foo","tag:bar"], count=1)
self.assertServiceCheck("kubernetes.kubelet.check.ping", status=AgentCheck.CRITICAL, tags=["tag:foo","tag:bar"], count=1)
@attr(requires='kubernetes')
class TestKubeutil(unittest.TestCase):
@mock.patch('utils.kubernetes.KubeUtil._locate_kubelet', return_value='http://172.17.0.1:10255')
def setUp(self, _locate_kubelet):
self.kubeutil = KubeUtil()
@mock.patch('utils.kubernetes.KubeUtil.retrieve_pods_list', side_effect=['foo'])
@mock.patch('utils.kubernetes.KubeUtil.extract_kube_pod_tags')
def test_get_kube_pod_tags(self, extract_kube_pod_tags, retrieve_pods_list):
self.kubeutil.get_kube_pod_tags(excluded_keys='bar')
retrieve_pods_list.assert_called_once()
extract_kube_pod_tags.assert_called_once_with('foo', excluded_keys='bar')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('utils.kubernetes.kubeutil.KubeUtil.get_auth_token', return_value='tkn')
def test_init_tls_settings(self, *args):
# kubelet
instances = [
# (instance, expected_result)
({}, {'kubelet_verify': True, 'bearer_token': 'tkn'}),
({'kubelet_tls_verify': False}, {'kubelet_verify': False, 'bearer_token': 'tkn'}),
({'kubelet_tls_verify': True}, {'kubelet_verify': True, 'bearer_token': 'tkn'}),
({'kubelet_tls_verify': 'foo.pem'}, {'kubelet_verify': 'foo.pem', 'bearer_token': 'tkn'}),
({'kubelet_cert': 'foo.pem'}, {'kubelet_verify': 'foo.pem', 'bearer_token': 'tkn'}),
({'kubelet_client_crt': 'client.crt', 'kubelet_client_key': 'client.key'},
{'kubelet_verify': True, 'kubelet_client_cert': ('client.crt', 'client.key'), 'bearer_token': 'tkn'}),
({'kubelet_tls_verify': True, 'kubelet_client_crt': 'client.crt'}, {'kubelet_verify': True, 'bearer_token': 'tkn'}),
({'kubelet_client_crt': 'client.crt'}, {'kubelet_verify': True, 'bearer_token': 'tkn'})
]
for instance, result in instances:
self.assertEqual(self.kubeutil._init_tls_settings(instance), result)
# apiserver
instance = {'apiserver_client_crt': 'foo.crt', 'apiserver_client_key': 'foo.key'}
expected_res = {'apiserver_client_cert': ('foo.crt', 'foo.key'), 'kubelet_verify': True, 'bearer_token': 'tkn'}
self.assertEqual(self.kubeutil._init_tls_settings(instance), expected_res)
with mock.patch('utils.kubernetes.kubeutil.os.path.exists', return_value=False):
self.assertEqual(self.kubeutil._init_tls_settings(instance), {'kubelet_verify': True, 'bearer_token': 'tkn'})
self.assertEqual(self.kubeutil._init_tls_settings(
{'apiserver_client_crt': 'foo.crt'}), {'kubelet_verify': True, 'bearer_token': 'tkn'})
##### Test _locate_kubelet #####
# we support connection to kubelet in 3 modes
# - no auth/no ssl --> over the --no-auth port
# - no auth/yes ssl (no verify) --> over the port used by apiserver if anonymous requests are accepted
# - yes auth/yes ssl (yes verify) --> same, but the user provided a way to verify kubelet's
# cert and we attach a bearer token if available
@mock.patch('utils.kubernetes.kubeutil.DockerUtil.get_hostname', return_value='test_docker_host')
def test_locate_kubelet_no_auth_no_ssl(self, _get_hostname):
no_auth_no_ssl_instances = [
# instance, expected_result
({}, 'http://test_docker_host:10255'),
({'host': 'test_explicit_host'}, 'http://test_explicit_host:10255'),
({'kubelet_port': '1337'}, 'http://test_docker_host:1337'),
({'host': 'test_explicit_host', 'kubelet_port': '1337'}, 'http://test_explicit_host:1337')
]
with mock.patch('utils.kubernetes.kubeutil.KubeUtil.perform_kubelet_query', return_value=True):
for instance, result in no_auth_no_ssl_instances:
self.assertEqual(self.kubeutil._locate_kubelet(instance), result)
@mock.patch('utils.kubernetes.kubeutil.DockerUtil.get_hostname', return_value='test_docker_host')
def test_locate_kubelet_no_auth_no_verify(self, _get_hostname):
no_auth_no_verify_instances = [
# instance, expected_result
({}, 'https://test_docker_host:10250'),
({'kubelet_port': '1337'}, 'https://test_docker_host:1337'),
({'host': 'test_explicit_host'}, 'https://test_explicit_host:10250'),
({'host': 'test_explicit_host', 'kubelet_port': '1337'}, 'https://test_explicit_host:1337'),
]
def side_effect(url):
"""Mock KubeUtil.perform_kubelet_query"""
if url.startswith('https://'):
return True
else:
raise Exception()
with mock.patch('utils.kubernetes.kubeutil.KubeUtil.perform_kubelet_query', side_effect=side_effect):
for instance, result in no_auth_no_verify_instances:
self.assertEqual(self.kubeutil._locate_kubelet(instance), result)
@mock.patch('utils.kubernetes.kubeutil.DockerUtil.get_hostname', return_value='test_docker_host')
@mock.patch('utils.kubernetes.kubeutil.KubeUtil.get_node_hostname', return_value='test_k8s_host')
@mock.patch('utils.kubernetes.kubeutil.KubeUtil.get_auth_token', return_value='foo')
def test_locate_kubelet_verify_and_auth(self, *args):
"""
Test kubelet connection with TLS. Also look for auth token.
"""
no_auth_instances = [
# instance, tls_settings, expected_result
(
{},
{'bearer_token': 'foo', 'kubelet_verify': True},
'https://test_k8s_host:10250'),
(
{'kubelet_port': '1337'},
{'bearer_token': 'foo', 'kubelet_verify': 'test.pem'},
'https://test_k8s_host:1337'),
(
{'host': 'test_explicit_host'},
{'bearer_token': 'foo', 'kubelet_verify': True, 'kubelet_client_cert': ('client.crt', 'client.key')},
'https://test_explicit_host:10250'
),
(
{'host': 'test_explicit_host', 'kubelet_port': '1337'},
{'bearer_token': 'foo', 'kubelet_verify': True},
'https://test_explicit_host:1337'
),
]
def side_effect(url, **kwargs):
"""Mock KubeUtil.perform_kubelet_query"""
if url.startswith('https://') and '10255' not in url:
return True
else:
raise Exception()
# no auth / TLS with verify
for instance, tls_settings, result in no_auth_instances:
with mock.patch('utils.kubernetes.kubeutil.requests') as req:
req.get = mock.MagicMock(side_effect=side_effect)
self.kubeutil.tls_settings = tls_settings
self.assertEqual(self.kubeutil._locate_kubelet(instance), result)
req.get.assert_called_with(result + '/healthz', # test endpoint
timeout=10,
verify=tls_settings.get('kubelet_verify', True),
headers={'Authorization': 'Bearer foo'} if 'kubelet_client_cert' not in tls_settings else None,
cert=tls_settings.get('kubelet_client_cert'),
params={'verbose': True}
)
@mock.patch('utils.kubernetes.kubeutil.KubeUtil.get_auth_token', return_value='foo')
def test_get_node_hostname(self, _get_auth_tkn):
node_lists = [
(json.loads(Fixtures.read_file('filtered_node_list_1_4.json', sdk_dir=FIXTURE_DIR, string_escape=False)), 'ip-10-0-0-179'),
({'items': [{'foo': 'bar'}]}, None),
({'items': []}, None),
({'items': [{'foo': 'bar'}, {'bar': 'foo'}]}, None)
]
for node_list, expected_result in node_lists:
with mock.patch('utils.kubernetes.kubeutil.KubeUtil.retrieve_json_auth', return_value=MockResponse(node_list, 200)):
self.assertEqual(self.kubeutil.get_node_hostname('ip-10-0-0-179'), expected_result)
def test_extract_kube_pod_tags(self):
"""
Test kube_pod_tags with both 1.1 and 1.2 version payloads
"""
res = self.kubeutil.extract_kube_pod_tags({}, ['foo'])
self.assertEqual(len(res), 0)
pods = json.loads(Fixtures.read_file("pods_list_1.1.json", sdk_dir=FIXTURE_DIR, string_escape=False))
res = self.kubeutil.extract_kube_pod_tags(pods, ['foo'])
labels = set(inn for out in res.values() for inn in out)
self.assertEqual(len(labels), 8 + 4)
res = self.kubeutil.extract_kube_pod_tags(pods, ['k8s-app'])
labels = set(inn for out in res.values() for inn in out)
self.assertEqual(len(labels), 6 + 4)
pods = json.loads(Fixtures.read_file("pods_list_1.2.json", sdk_dir=FIXTURE_DIR, string_escape=False))
res = self.kubeutil.extract_kube_pod_tags(pods, ['foo'])
labels = set(inn for out in res.values() for inn in out)
self.assertEqual(len(labels), 3 + 1)
res = self.kubeutil.extract_kube_pod_tags(pods, ['k8s-app'])
labels = set(inn for out in res.values() for inn in out)
self.assertEqual(len(labels), 3 + 1)
@mock.patch('utils.kubernetes.kubeutil.KubeUtil.perform_kubelet_query')
def test_retrieve_pods_list(self, retrieve_pods):
self.kubeutil.retrieve_pods_list()
self.assertTrue(retrieve_pods.call_args_list[0].endswith('/pods/'))
@mock.patch('utils.kubernetes.kubeutil.retrieve_json')
def test_retrieve_machine_info(self, retrieve_json):
self.kubeutil.retrieve_machine_info()
retrieve_json.assert_called_once_with(self.kubeutil.machine_info_url)
@mock.patch('utils.kubernetes.kubeutil.retrieve_json')
def test_retrieve_metrics(self, retrieve_json):
self.kubeutil.retrieve_metrics()
retrieve_json.assert_called_once_with(self.kubeutil.metrics_url)
@mock.patch('utils.kubernetes.kubeutil.requests')
def test_perform_kubelet_query(self, req):
base_params = {'timeout': 10, 'verify': False,
'params': {'verbose': True}, 'cert': None, 'headers': None}
auth_token_header = {'headers': {'Authorization': 'Bearer foo'}}
verify_true = {'verify': True}
verify_cert = {'verify': 'kubelet.pem'}
client_cert = {'cert': ('client.crt', 'client.key')}
instances = [
('http://test.com', {'bearer_token': 'foo'}, dict(base_params.items() + verify_true.items())),
('https://test.com', {'bearer_token': 'foo'}, dict(base_params.items() + verify_true.items() + auth_token_header.items())),
('https://test.com', {'bearer_token': 'foo', 'kubelet_verify': True}, dict(base_params.items() + verify_true.items() + auth_token_header.items())),
('https://test.com', {'bearer_token': 'foo', 'kubelet_verify': 'kubelet.pem'}, dict(base_params.items() + verify_cert.items() + auth_token_header.items())),
('https://test.com', {'bearer_token': 'foo', 'kubelet_client_cert': ('client.crt', 'client.key')},
dict(base_params.items() + verify_true.items() + client_cert.items())),
]
for url, ssl_context, expected_params in instances:
req.get.reset_mock()
self.kubeutil.tls_settings = ssl_context
self.kubeutil.perform_kubelet_query(url)
req.get.assert_called_with(url, **expected_params)
@mock.patch('utils.kubernetes.kubeutil.requests')
def test_retrieve_json_auth(self, r):
instances = [
# tls_settings, expected_params
(
{},
{'verify': False, 'timeout': 3, 'params': None, 'headers': {'content-type': 'application/json'}, 'cert': None}
), (
{'bearer_token': 'foo_tok'},
{'verify': False, 'timeout': 3, 'params': None, 'headers': {'Authorization': 'Bearer foo_tok', 'content-type': 'application/json'}, 'cert': None}
), (
{'bearer_token': 'foo_tok','apiserver_client_cert': ('foo.crt', 'foo.key')},
{'verify': False, 'timeout': 3, 'params': None, 'headers': {'content-type': 'application/json'}, 'cert': ('foo.crt', 'foo.key')}
),
]
for tls_settings, expected_params in instances:
r.get.reset_mock()
self.kubeutil.tls_settings = tls_settings
self.kubeutil.retrieve_json_auth('url')
r.get.assert_called_once_with('url', **expected_params)
r.get.reset_mock()
self.kubeutil.tls_settings = {'bearer_token': 'foo_tok'}
self.kubeutil.CA_CRT_PATH = __file__
self.kubeutil.retrieve_json_auth('url')
r.get.assert_called_with('url', verify=__file__, timeout=3, params=None, headers={'Authorization': 'Bearer foo_tok', 'content-type': 'application/json'}, cert=None)
def test_get_node_info(self):
with mock.patch('utils.kubernetes.KubeUtil._fetch_host_data') as f:
self.kubeutil._node_ip = None
self.kubeutil._node_name = None
self.kubeutil.get_node_info()
f.assert_called_once()
f.reset_mock()
self.kubeutil._node_ip = 'foo'
self.kubeutil._node_name = 'bar'
ip, name = self.kubeutil.get_node_info()
self.assertEqual(ip, 'foo')
self.assertEqual(name, 'bar')
f.assert_not_called()
def test__fetch_host_data(self):
"""
Test with both 1.1 and 1.2 version payloads
"""
with mock.patch('utils.kubernetes.KubeUtil.retrieve_pods_list') as mock_pods:
self.kubeutil.host_name = 'dd-agent-1rxlh'
mock_pods.return_value = json.loads(Fixtures.read_file("pods_list_1.2.json", sdk_dir=FIXTURE_DIR, string_escape=False))
self.kubeutil._fetch_host_data()
self.assertEqual(self.kubeutil._node_ip, '10.240.0.9')
self.assertEqual(self.kubeutil._node_name, 'kubernetes-massi-minion-k23m')
self.kubeutil.host_name = 'heapster-v11-l8sh1'
mock_pods.return_value = json.loads(Fixtures.read_file("pods_list_1.1.json", sdk_dir=FIXTURE_DIR, string_escape=False))
self.kubeutil._fetch_host_data()
self.assertEqual(self.kubeutil._node_ip, '10.240.0.9')
self.assertEqual(self.kubeutil._node_name, 'gke-cluster-1-8046fdfa-node-ld35')
def test_get_auth_token(self):
KubeUtil.AUTH_TOKEN_PATH = '/foo/bar'
self.assertIsNone(KubeUtil.get_auth_token({}))
KubeUtil.AUTH_TOKEN_PATH = Fixtures.file('events.json', sdk_dir=FIXTURE_DIR,) # any file could do the trick
self.assertIsNotNone(KubeUtil.get_auth_token({}))
def test_is_k8s(self):
os.unsetenv('KUBERNETES_PORT')
self.assertFalse(detect_is_k8s())
os.environ['KUBERNETES_PORT'] = '999'
self.assertTrue(detect_is_k8s())
def test_extract_event_tags(self):
events = json.loads(Fixtures.read_file("events.json", sdk_dir=FIXTURE_DIR, string_escape=False))['items']
for ev in events:
tags = KubeUtil().extract_event_tags(ev)
# there should be 6 tags except for some events where source.host is missing
self.assertTrue(len(tags) >= 5)
tag_names = [tag.split(':')[0] for tag in tags]
self.assertIn('reason', tag_names)
self.assertIn('namespace', tag_names)
self.assertIn('object_type', tag_names)
self.assertIn('object_name', tag_names)
self.assertIn('source_component', tag_names)
if len(tags) == 6:
self.assertIn('node_name', tag_names)
|
|
""" Wrapper around external SVM variant implementations like LibSVM or LIBLINEAR """
import logging
import warnings
try:
# import matplotlib as mpl
# mpl.rcParams['text.usetex'] = True
# mpl.rcParams['text.latex.unicode'] = True
import matplotlib.pyplot as plt
except ImportError:
pass
import scipy.spatial.distance
from pySPACE.missions.nodes.decorators import NoOptimizationParameter, ChoiceParameter
# import the external libraries
try: # Liblinear
import liblinearutil
except ImportError:
pass
try: # Libsvm
import svmutil
except ImportError:
pass
# representation of the linear classification vector
from pySPACE.resources.data_types.feature_vector import FeatureVector
# the output is a prediction vector
from pySPACE.resources.data_types.prediction_vector import PredictionVector
# array handling
import numpy
# base class
from pySPACE.missions.nodes.classification.base import RegularizedClassifierBase
@NoOptimizationParameter("regression")
class LibSVMClassifierNode(RegularizedClassifierBase):
"""Classify like a Standard SVM with the LibSVM settings.
This node is a wrapper around the *current* libsvm implementation of a SVM.
http://www.csie.ntu.edu.tw/~cjlin/libsvm/oldfiles/
**Parameters**
Some general parameters are only documented in the
:class:`RegularizedClassifierBase <pySPACE.missions.nodes.classification.base.RegularizedClassifierBase>`.
:svm_type:
Defines the used SVM type.
One of the following Strings: 'C-SVC', 'one-class SVM',
'epsilon-SVR', 'nu-SVR'. The last two types are for regression,
the first for classification.
.. warning:: For using "one-class SVM" better use the
:class:`~pySPACE.missions.nodes.classification.one_class.LibsvmOneClassNode`.
(*optional, default: 'C-SVC'*)
:complexity:
Defines parameter for 'C-SVC', 'epsilon-SVR' and 'nu-SVR'.
Complexity sets the weighting of punishment for misclassification
in comparison to generalizing classification from the data.
Equals parameter /cost/ or /C/ in libsvm-package.
Value in the range from 0 to infinity.
(*optional, default: 1*)
:str_label_function:
A String representing a Python eval()-able function,
that transforms the labels (list).
It makes only sense for numeric labels. E.g.
"lambda liste: [exp(-0.0001*elem**2) for elem in liste]".
(*optional, default: None*)
:debug:
If *debug* is True one gets additional output
concerning the classification.
.. note:: This makes only sense for the 'LINEAR'-*kernel_type*.
(*optional, default: False*)
:store:
Parameter of super-class. If *store* is True,
the classification vector is stored as a feature vector.
.. note:: This makes only sense for the 'LINEAR'-*kernel_type*.
(*optional, default: False*)
:max_iterations:
Restricts the solver inside the LibSVM to maximal
use N iterations, where N is the product of *max_iterations*
and the number of samples used to train the classifier.
If omitted or set to zero the
solver takes as much iterations it needs to
calculate the model.
.. note:: This number has to be an integer and
is very important if you expect the classifier
not to converge.
.. note:: To use this feature you will need the modified libsvm
of the external folder in a compiled version.
Furthermore you should make sure,
that this version is imported, e.g. by adding the path
at the beginning of the configuration file paths.
(*optional, default: 0*)
:complexities_path:
If a complexities_path is given, the complexity is read from a
YAML file. This file has a dict with channel numbers as keys and
the corresponding complexity as value. Also, a
'features_per_channel' dict entry can be set to calculate
channel number based on the number of features. If no
'features_per_channel' is given, a factor of 1 is assumed. This
can be used to specify the number of features in the file, instead
of the number of sensor channels. A minimal example for the file
content could be::
{32: 0.081, 62: 0.019, features_per_channel: 6}.
'complexities_path' will overwrite 'complexity'.
(*optional, default: 0*)
**Exemplary Call**
.. code-block:: yaml
-
node : LibSVM_Classifier
parameters :
svm_type : "C-SVC"
complexity : 1
kernel_type : "LINEAR"
class_labels : ['Standard', 'Target']
weight : [1,3]
debug : True
store : True
max_iterations : 100
:input: FeatureVector
:output: PredictionVector
:Author: Jan Hendrik Metzen (jhm@informatik.uni-bremen.de)
& Mario Krell (Mario.krell@dfki.de)
:Created: 2009/07/02
:Revised: 2010/04/09
:Last change: 2011/05/06 Mario Krell old version deleted
"""
def __init__(self, svm_type='C-SVC', max_iterations=0,
str_label_function=None, complexities_path=None, **kwargs):
# set default svm_type 'C-SVC' if unsupported svm_type is selected
supported_types = ["C-SVC", "one-class SVM", "epsilon-SVR", "nu-SVR"]
if svm_type not in supported_types:
svm_type = 'C-SVC'
warnings.warn("SVM-type unknown. C-SVC will be used!")
if svm_type == 'C-SVC':
regression = False
else:
regression = True
super(LibSVMClassifierNode, self).__init__(
regression=regression, **kwargs)
# Check if the svm module has been correctly imported
try:
import svmutil
except ImportError as e:
self._log("svmutil.py could not be imported.")
message = "Using the LibSVMClassifierNode requires " + \
"the Python svm module provided by libsvm. " + \
"For installation hints see documentation " + \
"or http://www.csie.ntu.edu.tw/~cjlin/libsvm/." + \
"Furthermore try to import the path to the " + \
"external folder."
args = e.args
if not args:
e.args = message
else:
e.args = (message,) + args
raise
self.set_permanent_attributes(str_label_function=str_label_function,
svm_type=svm_type,
max_iterations=int(max_iterations),
store_all_samples=True,
predictor_iterations=numpy.Inf)
def _stop_training(self, debug=False):
""" Finish the training, i.e. train the SVM """
self._complete_training(debug)
self.relabel_training_set()
def _complete_training(self, debug=False):
""" Iterate over the complete data to get the initial model """
########## read complexities file if given ##########
if self.complexities_path is not None:
import yaml
complexities_file=open(self.complexities_path, 'r')
complexities = yaml.load(complexities_file)
# nr of channels = nr of features (==dim) / features_per_channel
if not 'features_per_channel' in complexities:
complexities['features_per_channel'] = 1
self.complexity = complexities[
round(self.dim/complexities['features_per_channel'])]
self._log("Read complexity %s from file. Dimension is %s" %
(self.complexity, self.dim), level=logging.INFO)
# not compatible with regression!
# self._log("Instances of Class %s: %s, %s: %s" \
# % (self.classes[0],
# self.labels.count(self.classes.index(self.classes[0])),
# self.classes[1],
# self.labels.count(self.classes.index(self.classes[1]))))
# instead this?:
self._log("Performing training of SVM.")
########## Calculation of default gamma ##########
self.calculate_gamma()
self.num_samples = len(self.samples)
# nr_weight is the number of elements in the array weight_label and
# weight. Each weight[i] corresponds to weight_label[i], meaning that
# the penalty of class weight_label[i] is scaled by a factor of
# weight[i]. If you do not want to change penalty for any of the
# classes, just set nr_weight to 0.
########## preparation of the libsvm command ##########
# for probability output add "-b 1" to options
options = \
"-c %.42f -d %d -g %.42f -r %.42f -n %.42f -p %.42f -e %.20f -m %.42f" % \
(self.complexity, self.exponent, self.gamma,
self.offset, self.nu, self.epsilon, self.tolerance, 1000)
# use 1000MB instead of 100MB (default)
# options += " -b 1" un-comment this for probabilistic output!
if self.multinomial:
options += " -b 1"
for i,w in enumerate(self.weight):
options += " -w%d %.42f" % (i, w)
if self.kernel_type == 'LINEAR':
options += " -t 0"
elif self.kernel_type == 'POLY':
options += " -t 1"
elif self.kernel_type == 'RBF':
options += " -t 2"
elif self.kernel_type == 'SIGMOID':
options += " -t 3"
else:
self.kernel_type = 'LINEAR'
options += " -t 0"
warnings.warn("Kernel unknown! Precomputed Kernels are not " +
"yet implemented. Linear Kernel used.")
# PRECOMPUTED: kernel values in training_set_file
# (not yet implemented)
if self.svm_type == 'C-SVC':
options += " -s 0"
elif self.svm_type == 'nu-SVR':
options += " -s 1"
elif self.svm_type == 'one-class SVM':
options += " -s 2"
elif self.svm_type == 'epsilon-SVR':
options += " -s 3"
else:
options += " -s 0"
self.svm_type = 'C-SVC'
warnings.warn("SVM-type unknown. C-SVC will be used!")
if not self.debug:
options += " -q"
self._log("Libsvm is now quiet!")
old_libsvm_options = options
if self.max_iterations != 0:
options += " -i %d" % self.max_iterations
try:
param = svmutil.svm_parameter(options)
except ValueError:
param = svmutil.svm_parameter(old_libsvm_options)
self._log(
"Using max_iterations is not supported by the standard " +
"LIBSVM. Change your Python path to our customized version!",
level=logging.CRITICAL)
# transform labels with *label_function*
if self.str_label_function is not None:
self.label_function = eval(self.str_label_function)
self.labels = self.label_function(self.labels)
# build the classifier
# h = [map(float,list(data)) for data in self.samples]
problem = svmutil.svm_problem(self.labels, [
map(float, list(data)) for data in self.samples])
model = svmutil.svm_train(problem, param)
if not self.multinomial:
if (self.svm_type == 'C-SVC' or self.svm_type == 'one-class SVM') \
and self.kernel_type == 'LINEAR':
self.calculate_classification_vector(model)
if self.debug:
# This calculation is needed for further analysis
self.calculate_slack_variables(model)
print "LIBSVM Parameter:"
self.print_variables()
else:
# Slack variables are the same no matter which kernel is used
# This method is mainly used to reduce the number of samples
# being stored later on.
if self.debug:
self.calculate_slack_variables(model)
self.model = model
else:
self.model = model
# Slack variables are the same no matter which kernel is used
# This method is mainly used to reduce the number of samples
# being stored later on.
# read number of iterations needed to solve the problem
if self.max_iterations != 0:
try:
predictor_iterations = model.get_num_iterations()
self.classifier_information["~~Solver_Iterations~~"] = \
predictor_iterations
if predictor_iterations == 0 or \
predictor_iterations == numpy.Inf:
self.classifier_information["~~SVM_Converged~~"] = False
else:
self.classifier_information["~~SVM_Converged~~"] = True
except:
warnings.warn("Could not read state of the LibSVM Solver " +
"from the C-Library!")
try:
self.classifier_information["~~offset~~"] = self.b
self.classifier_information["~~w0~~"] = self.w[0]
self.classifier_information["~~w1~~"] = self.w[1]
except:
pass
self.delete_training_data()
def _execute(self, x):
""" Executes the classifier on the given data vector x.
prediction value = <w,data>+b in the linear case."""
data = x.view(numpy.ndarray)
if self.svm_type == 'C-SVC':
if self.kernel_type == 'LINEAR' and not self.multinomial:
return super(LibSVMClassifierNode, self)._execute(x)
else:
# for probability output add "-b 1" as 4th parameter
if self.multinomial:
try:
p_labs, p_acc, p_vals = svmutil.svm_predict([0], [
map(float, list(data[0, :]))], self.model,
'-q -b 1')
except ValueError: # Wrong options
p_labs, p_acc, p_vals = svmutil.svm_predict([0], [
map(float, list(data[0, :]))], self.model, '-b 1')
else:
try:
prediction_value = svmutil.svm_predict([0], [
map(float, list(data[0, :]))], self.model,
'-q')[2][0][0]
except ValueError: # Wrong options
prediction_value = svmutil.svm_predict([0], [
map(float, list(data[0, :]))], self.model)[2][0][0]
except IndexError:
warnings.warn("Probably your classification failed!")
prediction_value = 0
# The new version has only one output of the score.
# The ordering can be obtained by model.labels and if it is
# not [1,0] we have to change the sign of the score to be
# comparable with the old libsvm AND to do the right mapping
# back to the binary labels
if self.model.get_labels() == [0, 1]:
prediction_value = -prediction_value
# Look up class label
# prediction_value --> {-1,1} --> {0,1} --> Labels
if self.multinomial:
prediction = self.classes[int(p_labs[0])]
prediction_value = p_vals[0][int(p_labs[0])]
else:
if prediction_value > 0:
prediction = self.classes[1]
else:
prediction = self.classes[0]
prediction_vector = PredictionVector(label=prediction,
prediction=prediction_value,
predictor=self)
return prediction_vector
elif self.svm_type == 'one-class SVM': # one-class! TODO: Extra Node?
# for probability output add "-b 1" as 4th parameter
# get prediction as mentioned above
if not self.kernel_type == "LINEAR" and not self.multinomial:
try:
prediction = svmutil.svm_predict([0], [
map(float, list(data[0, :]))], self.model, "-q")
except ValueError:
prediction = svmutil.svm_predict([0], [
map(float, list(data[0, :]))], self.model)
prediction_value = prediction[2][0][0]
if prediction_value >= 0:
label = self.classes[0]
else:
label = self.classes[1]
return PredictionVector(prediction=prediction_value,
predictor=self,
label=label)
else:
result = super(LibSVMClassifierNode, self)._execute(x)
# invert label
result.label = self.classes[1-self.classes.index(result.label)]
return result
else: # regression! TODO: Extra Node?
# for probability output add "-b 1" as 4th parameter
try:
prediction_value = svmutil.svm_predict([0], [
map(float, list(data[0, :]))], self.model, "-q")
except ValueError:
prediction_value = svmutil.svm_predict([0], [
map(float, list(data[0, :]))], self.model)
prediction_value = prediction_value[2][0][0]
return PredictionVector(prediction=prediction_value,
predictor=self)
def save_model(self, filename):
svmutil.svm_save_model(filename, self.model)
def load_model(self, filename):
print 'load model'
self.model = svmutil.svm_load_model(filename)
def calculate_slack_variables(self, model):
"""This method calculates from the given SVM model
the related slack variables for classification."""
self.t = []
self.num_sv = 0
self.num_nsv = 0
self.inner_margin = 0
self.ti = []
dropped_samples = []
dropped_labels = []
for i in range(self.num_samples):
# ctype libsvm bindings
try:
p = svmutil.svm_predict([0], [
map(float, list(self.samples[i-self.num_nsv]))],
model, "-q")[2][0][0]
except ValueError:
p = svmutil.svm_predict([0], [
map(float, list(self.samples[i-self.num_nsv]))],
model)[2][0][0]
except IndexError:
self._log("Classification failed. " +
"Did you specify the parameters correctly?",
level=logging.ERROR)
p = 0
if model.get_labels() == [0,1]:
p = -p
p *= 2 * (self.labels[i - self.num_nsv] - 0.5)
if p > 1:
self.t.append(0)
self.ti.append(0)
dropped_samples.append(self.samples.pop(i - self.num_nsv))
dropped_labels.append(self.labels.pop(i - self.num_nsv))
self.num_nsv += 1
else:
self.t.append(1-p)
self.num_sv += 1
if 1-p<1e-5:
p = 1
self.ti.append(0)
else:
self.ti.append(1-p)
self.inner_margin +=1
# if self.store_all_samples:
for i in range(len(dropped_samples)):
self.samples.append(dropped_samples[i])
self.labels.append(dropped_labels[i])
del(dropped_samples)
del(dropped_labels)
def calculate_classification_vector(self, model):
""" Calculate classification vector w and the offset b """
# ctypes libsvm bindings
# TODO get parameter maybe easier
try:
self.b = svmutil.svm_predict([0], [[0.0]*self.dim], model,
"-q")[2][0][0]
except ValueError:
self.b = svmutil.svm_predict([0], [[0.0]*self.dim], model)[2][0][0]
except IndexError:
self._log("Classification failed. " +
"Did you specify the parameters correctly?",
level=logging.ERROR)
self.b = 0
self.w = numpy.zeros(self.dim)
self.features = FeatureVector(numpy.atleast_2d(self.w).astype(
numpy.float64), self.feature_names)
if model.get_labels() == [0, 1]:
self.b = -self.b
self.w = numpy.zeros(self.dim)
for i in range(self.dim):
e = [0.0] * self.dim
e[i] = 1.0
try:
self.w[i] = svmutil.svm_predict([0],[e], model, "-q")[2][0][0]
except ValueError:
try:
self.w[i] = svmutil.svm_predict([0],[e], model)[2][0][0]
except IndexError:
pass
except IndexError:
pass
if model.get_labels() == [0,1]:
self.w[i] = -self.w[i]
self.w[i] -= self.b
self.features = FeatureVector(numpy.atleast_2d(self.w).astype(
numpy.float64), self.feature_names)
try:
wf = []
for i,feature in enumerate(self.feature_names):
if not self.w[i] == 0:
wf.append((self.w[i],feature))
wf.sort()
w = numpy.array(wf, dtype='|S200')
except ValueError:
self._log('w could not be converted.', level=logging.WARNING)
except IndexError:
self._log('There are more feature names than features. \
Please check your feature generation and input data.',
level=logging.CRITICAL)
self.b = 0
w = numpy.zeros(self.dim)
self.w = w
# only features without zero multiplier are relevant
self.num_retained_features = len(w)
self.classifier_information["~~Num_Retained_Features~~"] = \
self.num_retained_features
self.print_w = w
def remove_no_border_points(self, retraining_required):
""" Discard method to remove all samples from the training set that are
not in the border of their class.
The border is determined by a minimum distance from the center of
the class and a maximum distance.
:param retraining_required: flag if retraining is
requiered (the new point is a potential SV or a removed
one was a sv)
"""
# get centers of each class
targetSamples = [s for (s, l) in zip(self.samples, self.labels)\
if l == 1] # self.classes.index("Target")]
standardSamples = [s for (s, l) in zip(self.samples, self.labels)\
if l == 0] # self.classes.index("Standard")]
if self.training_set_ratio == "KEEP_RATIO_AS_IT_IS":
num_target = len(targetSamples)
num_standard = len(standardSamples)
num_target = 1.0 * num_target / (num_target + num_standard) * \
self.basket_size
num_standard = self.basket_size - num_target
# mean vector of each class (its center)
mTarget = numpy.mean(targetSamples, axis=0)
mStandard = numpy.mean(standardSamples, axis=0)
# euclidean distance between the class centers
R = scipy.spatial.distance.euclidean(mTarget, mStandard)
if self.show_plot:
dim = numpy.shape(self.samples)[1]
if dim == 2:
self.plot_class_borders(
mStandard, mTarget, R, self.scale_factor_small,
self.scale_factor_tall)
# get distance of each point to its class center
distances = []
for i, (s, l) in enumerate(zip(self.samples, self.labels)):
if l==self.classes.index("Target"):
r_1 = scipy.spatial.distance.euclidean(s,mTarget)
r_2 = scipy.spatial.distance.euclidean(s,mStandard)
distances.append([i, s, l, r_1, r_2/(r_1 + r_2)])
else:
r_1 = scipy.spatial.distance.euclidean(s,mStandard)
r_2 = scipy.spatial.distance.euclidean(s,mTarget)
distances.append([i, s, l, r_1, r_2/(r_1 + r_2)])
if self.border_handling == "USE_ONLY_BORDER_POINTS":
# remove all points that are not in the border (in a specific
# radius) around the center
# does not guarantee that demanded number of samples are
# contained in the new training set
distances = filter(lambda x: (
self.scale_factor_small*R < x[3] < self.scale_factor_tall*R),
distances)
# sort according to weight
distances.sort(key=lambda x: x[5])
# pay attention to the basket size
distances = distances[:self.basket_size]
elif self.border_handling == "USE_DIFFERENCE":
# take that point that differ most
# sort by distance, then sort by weight
distances.sort(key=lambda x: (abs(x[3] -
((self.scale_factor_tall -
self.scale_factor_small)/2.0)*R) *
(x[0] != len(self.samples)), x[4]))
if self.border_handling == "USE_ONLY_BORDER_POINTS":
# pay attention to the basket size
distances = distances[:self.basket_size]
elif self.training_set_ratio == "KEEP_RATIO_AS_IT_IS":
distances_tmp = []
for d in distances:
if d[2] == 1 and num_target > 0:
num_target -= 1
distances_tmp.append(d)
elif d[2] == 0 and num_standard > 0:
num_standard -= 1
distances_tmp.append(d)
distances = distances_tmp
elif self.training_set_ratio == "BALANCED_RATIO":
distances_tmp = []
num_target = 0
num_standard = 0
for d in distances:
if d[2] == 1 and num_target < (self.basket_size/2):
num_target += 1
distances_tmp.append(d)
elif d[2] == 0 and num_standard < (self.basket_size/2):
num_standard += 1
distances_tmp.append(d)
distances = distances_tmp
else:
# pay attention to the basket size
distances = distances[:self.basket_size]
[idxs, _, _, _, _] = zip(*distances)
retraining_required = self.remove_samples(list(
set(numpy.arange(self.num_samples)) - set(idxs))) \
or retraining_required
return retraining_required
def add_new_sample(self, data, class_label=None, default=False):
""" Add a new sample to the training set.
:param data: A new sample for the training set.
:type data: list of float
:param class_label: The label of the new sample.
:type class_label: str
:param default: Specifies if the sample is added to the current
training set or to a future training set
:param default: bool
"""
# use a separate knowledge base when old samples will be totally removed
if (self.discard_type == "CDT" or self.discard_type == "INC_BATCH")\
and default is False:
self.future_samples.append(data)
self.future_labels.append(class_label)
# the sample size for the new knowledge base
# is limited to basket size, so pop oldest
while len(self.future_samples) > self.basket_size:
self.future_samples.pop(0)
self.future_labels.pop(0)
else:
# add new data
self._train_sample(data, class_label)
self.num_samples += 1
def remove_samples(self, idxs):
""" Remove the samples at the given indices from the training set.
:param: idxs: Indices of the samples to remove.
:type: idxs: list of int
:rtype: bool - True if a support vector was removed.
"""
idxs.sort(reverse=True)
for idx in idxs:
self.samples.pop(idx)
self.labels.pop(idx)
if self.add_type == "UNSUPERVISED_PROB":
self.decisions.pop(idx)
self.num_samples -= 1
return True
def visualize(self):
""" Show the training samples, SVS and the current decision function
"""
dim = numpy.shape(self.samples)[1]
if dim == 2:
ax = plt.gca()
ax.set_xlabel(r'$x_0$')
ax.set_ylabel(r'$x_1$')
self.plot_samples()
self.plot_hyperplane()
elif dim == 3:
ax = plt.gca(projection='3d')
ax.set_xlabel(r'$x_0$')
ax.set_ylabel(r'$x_1$')
ax.set_zlabel(r'$x_2$')
self.plot_samples_3D()
self.plot_hyperplane_3D()
if dim == 2 or dim == 3:
plt.draw()
if self.save_plot is True:
imagename = "%s/tmp%010d.png"\
% (self.plot_storage, self.m_counter_i)
self.m_counter_i += 1
plt.savefig(imagename)
@NoOptimizationParameter("use_list")
@ChoiceParameter("svm_type", choices=[0, 1, 2, 3, 4, 5, 6, 7])
class LiblinearClassifierNode(LibSVMClassifierNode):
""" Code Integration of external linear SVM classifier program
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
LIBLINEAR was implemented by the LIBSVM programmers.
It is important to mention, that here (partially) the same modified SVM
model is used as in the SOR variant.
(:mod:`pySPACE.missions.nodes.classification.svm_variants.SOR`)
**Parameters**
Some general parameters are only documented in the
:class:`RegularizedClassifierBase <pySPACE.missions.nodes.classification.base.RegularizedClassifierBase>`.
:svm_type:
:0: L2-regularized logistic regression (primal)
:1: L2-regularized L2-loss support vector classification (dual)
:2: L2-regularized L2-loss support vector classification (primal)
:3: L2-regularized L1-loss support vector classification (dual)
:4: multi-class support vector classification by Crammer and Singer
:5: L1-regularized L2-loss support vector classification
:6: L1-regularized logistic regression
:7: L2-regularized logistic regression (dual)
Type 3 is the standard SVM with
b used in the target function as component of w (offset = True)
or b set to zero.
(*optional, default:3*)
:tolerance:
Tolerance of termination criterion, same default as in libsvm.
.. todo:: Same variable name in upper class for epsilon-SVR
instead of tolerance.
(*optional, default: 0.001*)
:offset:
If True, x is internally replaced by (x,1)
to get an artificial offset b.
Probably in this case b is regularized.
Otherwise the offset b in the classifier function (w^Tx+b)
is set to zero.
(*optional, default: True*)
:store:
Parameter of super-class. If *store* is True,
the classification vector is stored as a feature vector.
(*optional, default: False*)
**Exemplary Call**
.. code-block:: yaml
-
node : lSVM
parameters :
class_labels : ["Target", "Standard"]
:Author: Mario Michael Krell (mario.krell@dfki.de)
:Created: 2012/01/19
"""
def __init__(self,tolerance=0.001, svm_type=3, offset=True, **kwargs):
if offset:
offset = 1
else:
offset = -1
super(LiblinearClassifierNode,self).__init__(use_list=True, **kwargs)
# svm type is renamed such that C-SVC is still used in the super class
# this is currently especially advantageous in the execute method
self.set_permanent_attributes(
tolerance=tolerance, alg_num=svm_type, offset=offset)
def _train(self, data, class_label):
""" Trains the classifier on the given data
It is assumed that the class_label parameter
contains information about the true class the data belongs to
.. todo:: check in new version of liblinear, if ndarrays are accepted
and the method from libsvm can be used.
"""
self._train_phase_started = True
if self.feature_names is None:
try:
self.feature_names = data.feature_names
except AttributeError as e:
warnings.warn(
"Use a feature generator node before a classification node."
)
raise e
if self.dim is None:
self.dim = data.shape[1]
if self.samples is None:
self.samples = []
if self.labels is None:
self.labels = []
if class_label not in self.classes:
warnings.warn(
"Please give the expected classes to the classifier! " +
"%s unknown. Therefore, define the variable " % class_label +
"'class_labels' in your spec file, " +
"where you use your classifier. " +
"For further information refer to the node documentation.")
self.classes.append(class_label)
self.set_permanent_attributes(classes=self.classes)
# Collect the data
data_array=data.view(numpy.ndarray)
self.samples.append(map(float, list(data_array[0,:])))
self.labels.append(self.classes.index(class_label))
def _stop_training(self, debug=False):
""" Finish the training, i.e. train the SVM """
self._complete_training(debug)
self.relabel_training_set()
def _complete_training(self, debug=False):
""" Forward data to external training and extract classifier information
"""
if self.str_label_function is not None:
self.label_function = eval(self.str_label_function)
self.labels = self.label_function()
options = "-c %.42f -e %.42f -s %d -B %d" % \
(self.complexity, self.tolerance, self.alg_num, self.offset)
for i,w in enumerate(self.weight):
options += " -w%d %.42f" % (i, w)
if not self.debug:
options += " -q"
self._log("Liblinear is now quiet!")
import liblinearutil
param = liblinearutil.parameter(options)
problem = liblinearutil.problem(self.labels, self.samples)
model = liblinearutil.train(problem, param)
self.calculate_classification_vector(model)
if self.debug:
print self.print_w
print self.b
def calculate_classification_vector(self, model):
"""This method calculates from the given SVM model
the related classification vector w and the offset b."""
# ctypes liblinear bindings
if self.offset == 1:
self.b = model.w[self.dim]
else:
self.b = 0
self.w = numpy.zeros(self.dim)
for i in range(self.dim):
self.w[i] = model.w[i]
if model.get_labels() == [0,1]:
self.w = -1*self.w
self.b = -1*self.b
self.features = FeatureVector(numpy.atleast_2d(self.w).astype(
numpy.float64), self.feature_names)
try:
wf=[]
for i,feature in enumerate(self.feature_names):
if not self.w[i] == 0:
wf.append((self.w[i],feature))
wf.sort()
w = numpy.array(wf, dtype='|S20')
except ValueError :
print 'w could not be converted.'
except IndexError :
print 'There are more feature names than features. \
Please check your feature generation and input data.'
self.b = 0
w = numpy.zeros(self.dim)
self.w = w
# only features without zero multiplier are relevant
self.num_retained_features = len(w)
self.classifier_information["~~Num_Retained_Features~~"] =\
self.num_retained_features
self.print_w = w
_NODE_MAPPING = {"LibSVM_Classifier": LibSVMClassifierNode,
"2SVM": LibSVMClassifierNode,
"lSVM": LiblinearClassifierNode,
}
|
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements graph convolutions layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow_graphics.geometry.convolution.graph_convolution as gc
from tensorflow_graphics.util import export_api
def feature_steered_convolution_layer(
data,
neighbors,
sizes,
translation_invariant=True,
num_weight_matrices=8,
num_output_channels=None,
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.1),
name='graph_convolution_feature_steered_convolution',
var_name=None):
# pyformat: disable
"""Wraps the function `feature_steered_convolution` as a TensorFlow layer.
The shorthands used below are
`V`: The number of vertices.
`C`: The number of channels in the input data.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
data: A `float` tensor with shape `[A1, ..., An, V, C]`.
neighbors: A SparseTensor with the same type as `data` and with shape
`[A1, ..., An, V, V]` representing vertex neighborhoods. The neighborhood
of a vertex defines the support region for convolution. For a mesh, a
common choice for the neighborhood of vertex `i` would be the vertices in
the K-ring of `i` (including `i` itself). Each vertex must have at least
one neighbor. For a faithful implementation of the FeaStNet paper,
neighbors should be a row-normalized weight matrix corresponding to the
graph adjacency matrix with self-edges:
`neighbors[A1, ..., An, i, j] > 0` if vertex `i` and `j` are neighbors,
`neighbors[A1, ..., An, i, i] > 0` for all `i`, and
`sum(neighbors, axis=-1)[A1, ..., An, i] == 1.0` for all `i`.
These requirements are relaxed in this implementation.
sizes: An `int` tensor of shape `[A1, ..., An]` indicating the true input
sizes in case of padding (`sizes=None` indicates no padding).
`sizes[A1, ..., An] <= V`. If `data` and `neighbors` are 2-D, `sizes` will
be ignored. As an example, consider an input consisting of three graphs
`G0`, `G1`, and `G2` with `V0`, `V1` and `V2` vertices respectively. The
padded input would have the following shapes: `data.shape = [3, V, C]`,
and `neighbors.shape = [3, V, V]`, where `V = max([V0, V1, V2])`. The true
sizes of each graph will be specified by `sizes=[V0, V1, V2]`.
`data[i, :Vi, :]` and `neighbors[i, :Vi, :Vi]` will be the vertex and
neighborhood data of graph `Gi`. The `SparseTensor` `neighbors` should
have no nonzero entries in the padded regions.
translation_invariant: A `bool`. If `True` the assignment of features to
weight matrices will be invariant to translation.
num_weight_matrices: An `int` specifying the number of weight matrices used
in the convolution.
num_output_channels: An optional `int` specifying the number of channels in
the output. If `None` then `num_output_channels = C`.
initializer: An initializer for the trainable variables.
name: A (name_scope) name for this op. Passed through to
feature_steered_convolution().
var_name: A (var_scope) name for the variables. Defaults to
`graph_convolution_feature_steered_convolution_weights`.
Returns:
Tensor with shape `[A1, ..., An, V, num_output_channels]`.
"""
# pyformat: enable
with tf.compat.v1.variable_scope(
var_name,
default_name='graph_convolution_feature_steered_convolution_weights'):
# Skips shape validation to avoid redundancy with
# feature_steered_convolution().
data = tf.convert_to_tensor(value=data)
in_channels = tf.compat.dimension_value(data.shape[-1])
if num_output_channels is None:
out_channels = in_channels
else:
out_channels = num_output_channels
var_u = tf.compat.v1.get_variable(
shape=(in_channels, num_weight_matrices),
dtype=data.dtype,
initializer=initializer,
name='u')
if translation_invariant:
var_v = -var_u
else:
var_v = tf.compat.v1.get_variable(
shape=(in_channels, num_weight_matrices),
dtype=data.dtype,
initializer=initializer,
name='v')
var_c = tf.compat.v1.get_variable(
shape=(num_weight_matrices),
dtype=data.dtype,
initializer=initializer,
name='c')
var_w = tf.compat.v1.get_variable(
shape=(num_weight_matrices, in_channels, out_channels),
dtype=data.dtype,
initializer=initializer,
name='w')
var_b = tf.compat.v1.get_variable(
shape=(out_channels),
dtype=data.dtype,
initializer=initializer,
name='b')
return gc.feature_steered_convolution(
data=data,
neighbors=neighbors,
sizes=sizes,
var_u=var_u,
var_v=var_v,
var_c=var_c,
var_w=var_w,
var_b=var_b,
name=name)
class FeatureSteeredConvolutionKerasLayer(tf.keras.layers.Layer):
"""Wraps the function `feature_steered_convolution` as a Keras layer."""
def __init__(self,
translation_invariant=True,
num_weight_matrices=8,
num_output_channels=None,
initializer=None,
name=None,
**kwargs):
"""Initializes FeatureSteeredConvolutionKerasLayer.
Args:
translation_invariant: A `bool`. If `True` the assignment of features to
weight matrices will be invariant to translation.
num_weight_matrices: An `int` specifying the number of weight matrices
used in the convolution.
num_output_channels: An optional `int` specifying the number of channels
in the output. If `None` then `num_output_channels` will be the same as
the input dimensionality.
initializer: An initializer for the trainable variables. If `None`,
defaults to `tf.keras.initializers.TruncatedNormal(stddev=0.1)`.
name: A name for this layer.
**kwargs: Additional keyword arguments passed to the base layer.
"""
super(FeatureSteeredConvolutionKerasLayer, self).__init__(
name=name, **kwargs)
self._num_weight_matrices = num_weight_matrices
self._num_output_channels = num_output_channels
self._translation_invariant = translation_invariant
if initializer is None:
self._initializer = tf.keras.initializers.TruncatedNormal(stddev=0.1)
else:
self._initializer = initializer
def build(self, input_shape):
"""Initializes the trainable weights."""
in_channels = tf.TensorShape(input_shape[0]).as_list()[-1]
if self._num_output_channels is None:
out_channels = in_channels
else:
out_channels = self._num_output_channels
dtype = self.dtype
num_weight_matrices = self._num_weight_matrices
initializer = self._initializer
self.var_u = self.add_weight(
shape=(in_channels, num_weight_matrices),
dtype=dtype,
initializer=initializer,
name='u')
if self._translation_invariant:
self.var_v = -self.var_u
else:
self.var_v = self.add_weight(
shape=(in_channels, num_weight_matrices),
dtype=dtype,
initializer=initializer,
name='v')
self.var_c = self.add_weight(
shape=(num_weight_matrices,),
dtype=dtype,
initializer=initializer,
name='c')
self.var_w = self.add_weight(
shape=(num_weight_matrices, in_channels, out_channels),
dtype=dtype,
initializer=initializer,
name='w',
trainable=True)
self.var_b = self.add_weight(
shape=(out_channels,),
dtype=dtype,
initializer=initializer,
name='b',
trainable=True)
def call(self, inputs, **kwargs):
# pyformat: disable
"""Executes the convolution.
The shorthands used below are
`V`: The number of vertices.
`C`: The number of channels in the input data.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
inputs: A list of two tensors `[data, neighbors]`. `data` is a `float`
tensor with shape `[A1, ..., An, V, C]`. `neighbors` is a `SparseTensor`
with the same type as `data` and with shape `[A1, ..., An, V, V]`
representing vertex neighborhoods. The neighborhood of a vertex defines
the support region for convolution. For a mesh, a common choice for the
neighborhood of vertex `i` would be the vertices in the K-ring of `i`
(including `i` itself). Each vertex must have at least one neighbor. For
a faithful implementation of the FeaStNet paper, neighbors should be a
row-normalized weight matrix corresponding to the graph adjacency matrix
with self-edges: `neighbors[A1, ..., An, i, j] > 0` if vertex `j` is a
neighbor of vertex `i`, and `neighbors[A1, ..., An, i, i] > 0` for all
`i`, and `sum(neighbors, axis=-1)[A1, ..., An, i] == 1.0` for all `i`.
These requirements are relaxed in this implementation.
**kwargs: A dictionary containing the key `sizes`, which is an `int` tensor
of shape `[A1, ..., An]` indicating the true input sizes in case of
padding (`sizes=None` indicates no padding). `sizes[A1, ..., An] <= V`.
If `data` and `neighbors` are 2-D, `sizes` will be ignored. As an
example usage of `sizes`, consider an input consisting of three graphs
`G0`, `G1`, and `G2` with `V0`, `V1`, and `V2` vertices respectively.
The padded input would have the shapes `data.shape = [3, V, C]`, and
`neighbors.shape = [3, V, V]`, where `V = max([V0, V1, V2])`. The true
sizes of each graph will be specified by `sizes=[V0, V1, V2]`.
`data[i, :Vi, :]` and `neighbors[i, :Vi, :Vi]` will be the vertex and
neighborhood data of graph `Gi`. The `SparseTensor` `neighbors` should
have no nonzero entries in the padded regions.
Returns:
Tensor with shape `[A1, ..., An, V, num_output_channels]`.
"""
# pyformat: enable
sizes = kwargs.get('sizes', None)
return gc.feature_steered_convolution(
data=inputs[0],
neighbors=inputs[1],
sizes=sizes,
var_u=self.var_u,
var_v=self.var_v,
var_c=self.var_c,
var_w=self.var_w,
var_b=self.var_b)
class DynamicGraphConvolutionKerasLayer(tf.keras.layers.Layer):
"""A keras layer for dynamic graph convolutions.
Dynamic GraphCNN for Learning on Point Clouds
Yue Wang, Yongbin Sun, Ziwei Liu, Sanjay E. Sarma, Michael Bronstein, and
Justin Solomon.
https://arxiv.org/abs/1801.07829
This layer implements an instance of the graph convolutional operation
described in the paper above, specifically a graph convolution block with a
single edge filtering layer. This implementation is intended to demonstrate
how `graph_convolution.edge_convolution_template` can be wrapped to implement
a variety of edge convolutional methods.
This implementation is slightly generalized version to what is described in
the paper in that here variable sized neighborhoods are allowed rather than
forcing a fixed size k-neighbors. Users must provide the neighborhoods as
input.
"""
def __init__(self,
num_output_channels,
reduction,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
name=None,
**kwargs):
"""Initializes DynamicGraphConvolutionKerasLayer.
Args:
num_output_channels: An `int` specifying the number of output channels.
reduction: Either 'weighted' or 'max'. Specifies the reduction over
neighborhood edge features as described in the paper above.
activation: The `activation` argument of `tf.keras.layers.Conv1D`.
use_bias: The `use_bias` argument of `tf.keras.layers.Conv1D`.
kernel_initializer: The `kernel_initializer` argument of
`tf.keras.layers.Conv1D`.
bias_initializer: The `bias_initializer` argument of
`tf.keras.layers.Conv1D`.
kernel_regularizer: The `kernel_regularizer` argument of
`tf.keras.layers.Conv1D`.
bias_regularizer: The `bias_regularizer` argument of
`tf.keras.layers.Conv1D`.
activity_regularizer: The `activity_regularizer` argument of
`tf.keras.layers.Conv1D`.
kernel_constraint: The `kernel_constraint` argument of
`tf.keras.layers.Conv1D`.
bias_constraint: The `bias_constraint` argument of
`tf.keras.layers.Conv1D`.
name: A name for this layer.
**kwargs: Additional keyword arguments passed to the base layer.
"""
super(DynamicGraphConvolutionKerasLayer, self).__init__(name=name, **kwargs)
self._num_output_channels = num_output_channels
self._reduction = reduction
self._activation = activation
self._use_bias = use_bias
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._activity_regularizer = activity_regularizer
self._kernel_constraint = kernel_constraint
self._bias_constraint = bias_constraint
def build(self, input_shape): # pylint: disable=unused-argument
"""Initializes the layer weights."""
self._conv1d_layer = tf.keras.layers.Conv1D(
filters=self._num_output_channels,
kernel_size=1,
strides=1,
padding='valid',
activation=self._activation,
use_bias=self._use_bias,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
def call(self, inputs, **kwargs):
# pyformat: disable
"""Executes the convolution.
The shorthands used below are
`V`: The number of vertices.
`C`: The number of channels in the input data.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
inputs: A list of two tensors `[data, neighbors]`. `data` is a `float`
tensor with shape `[A1, ..., An, V, C]`. `neighbors` is a `SparseTensor`
with the same type as `data` and with shape `[A1, ..., An, V, V]`
representing vertex neighborhoods. The neighborhood of a vertex defines
the support region for convolution. For a mesh, a common choice for the
neighborhood of vertex `i` would be the vertices in the K-ring of `i`
(including `i` itself). Each vertex must have at least one neighbor. For
`reduction='weighted'`, `neighbors` should be a row-normalized matrix:
`sum(neighbors, axis=-1)[A1, ..., An, i] == 1.0` for all `i`, although
this is not enforced in the implementation in case different neighbor
weighting schemes are desired.
**kwargs: A dictionary containing the key `sizes`, which is an `int`
tensor of shape `[A1, ..., An]` indicating the true input sizes in case
of padding (`sizes=None` indicates no padding).
`sizes[A1, ..., An] <= V`. If `data` and `neighbors` are 2-D, `sizes`
will be ignored. As an example usage of `sizes`, consider an input
consisting of three graphs `G0`, `G1`, and `G2` with `V0`, `V1`, and
`V2` vertices respectively. The padded input would have the shapes
`data.shape = [3, V, C]`, and `neighbors.shape = [3, V, V]`,
where `V = max([V0, V1, V2])`. The true sizes of each graph will be
specified by `sizes=[V0, V1, V2]`. `data[i, :Vi, :]` and
`neighbors[i, :Vi, :Vi]` will be the vertex and neighborhood data of
graph `Gi`. The `SparseTensor` `neighbors` should have no nonzero
entries in the padded regions.
Returns:
Tensor with shape `[A1, ..., An, V, num_output_channels]`.
"""
# pyformat: enable
def _edge_convolution(vertices, neighbors, conv1d_layer):
r"""The edge filtering op passed to `edge_convolution_template`.
This instance implements the edge function
$$h_{\theta}(x, y) = MLP_{\theta}([x, y - x])$$
Args:
vertices: A 2-D Tensor with shape `[D1, D2]`.
neighbors: A 2-D Tensor with the same shape and type as `vertices`.
conv1d_layer: A callable 1d convolution layer.
Returns:
A 2-D Tensor with shape `[D1, D3]`.
"""
concat_features = tf.concat(
values=[vertices, neighbors - vertices], axis=-1)
concat_features = tf.expand_dims(concat_features, 0)
convolved_features = conv1d_layer(concat_features)
convolved_features = tf.squeeze(input=convolved_features, axis=(0,))
return convolved_features
sizes = kwargs.get('sizes', None)
return gc.edge_convolution_template(
data=inputs[0],
neighbors=inputs[1],
sizes=sizes,
edge_function=_edge_convolution,
reduction=self._reduction,
edge_function_kwargs={'conv1d_layer': self._conv1d_layer})
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
"""
Schema for AMQP management entity types.
Schema validation will validate and transform values, add default values and
check for uniqueness of enties/attributes that are specified to be unique.
A Schema can be loaded/dumped to a json file.
"""
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import sys
import traceback
from qpid_dispatch.management.entity import EntityBase
from qpid_dispatch.management.error import NotImplementedStatus
from ..compat import OrderedDict
from ..compat import PY_STRING_TYPE
from ..compat import PY_TEXT_TYPE
from ..compat import dict_keys
from ..compat import dict_items
try:
from ..dispatch import LogAdapter, LOG_WARNING
logger_available = True
except:
# We need to do this because at compile time the schema is pulled using this code and at that time the
# LogAdapter is not loaded. When running the router, the LogAdapter is available.
logger_available = False
class ValidationError(Exception):
"""Error raised if schema validation fails"""
pass
class Type(object):
"""Base class for schema types.
@ivar name: The type name.
@ivar pytype: The python type for this schema type.
"""
def __init__(self, name, pytype):
"""
@param name: The type name.
@param pytype: The python type for this schema type.
"""
self.name, self.pytype = name, pytype
def validate(self, value):
"""
Convert value to the correct python type.
"""
return self.pytype(value)
def dump(self):
"""
@return: Representation of the type to dump to json. Normally the type name,
EnumType.dump is the exception.
"""
return self.name
def __str__(self):
"""String name of type."""
return str(self.dump())
class BooleanType(Type):
"""A boolean schema type"""
def __init__(self):
super(BooleanType, self).__init__("boolean", bool)
VALUES = {"yes":1, "true":1, "on":1, "no":0, "false":0, "off":0}
def validate(self, value):
"""
@param value: A string such as "yes", "false" etc. is converted appropriately.
Any other type is converted using python's bool()
@return A python bool.
"""
try:
if isinstance(value, (PY_STRING_TYPE, PY_TEXT_TYPE)):
return self.VALUES[value.lower()]
return bool(value)
except:
raise ValidationError("Invalid Boolean value '%r'"%value)
class EnumValue(str):
"""A string that convets to an integer value via int()"""
def __new__(cls, name, value):
s = super(EnumValue, cls).__new__(cls, name)
setattr(s, 'value', value)
return s
def __hash__(self): return super(EnumValue, self).__hash__()
def __int__(self): return self.value
def __long__(self): return self.value
def __eq__(self, x): return str(self) == x or int(self) == x
def __ne__(self, x): return not self == x
def __repr__(self): return "EnumValue('%s', %s)"%(str(self), int(self))
class EnumType(Type):
"""An enumerated type"""
def __init__(self, tags):
"""
@param tags: A list of string values for the enumerated type.
"""
assert isinstance(tags, list)
super(EnumType, self).__init__("enum%s"%([str(t) for t in tags]), int)
self.tags = tags
def validate(self, value):
"""
@param value: May be a string from the set of enum tag strings or anything
that can convert to an int - in which case it must be in the enum range.
@return: An EnumValue.
"""
if value in self.tags:
return EnumValue(value, self.tags.index(value))
else:
try:
i = int(value)
return EnumValue(self.tags[i], i)
except (ValueError, IndexError):
pass
raise ValidationError("Invalid value for %s: %r"%(self.name, value))
def dump(self):
"""
@return: A list of the enum tags.
"""
return self.tags
def __str__(self):
"""String description of enum type."""
return "One of [%s]" % ', '.join([("'%s'" %tag) for tag in self.tags])
class PropertiesType(Type):
"""
A PropertiesType is a restricted map: keys must be AMQP 1.0 Symbol types.
See the "fields" type in:
http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-transport-v1.0-os.html#type-fields
"""
def __init__(self):
super(PropertiesType, self).__init__("properties", dict)
def validate(self, value):
if not isinstance(value, dict):
raise ValidationError("Properties must be a map");
for key in value.keys():
if (not isinstance(key, PY_STRING_TYPE)
or any(ord(x) > 127 for x in key)):
raise ValidationError("Property keys must be ASCII encoded")
return value
BUILTIN_TYPES = OrderedDict(
(t.name, t) for t in [Type("string", str),
Type("path", str),
Type("entityId", str),
Type("integer", int),
Type("list", list),
Type("map", dict),
Type("dict", dict),
PropertiesType(),
BooleanType()])
def get_type(rep):
"""
Get a schema type.
@param rep: json representation of the type.
"""
if isinstance(rep, list):
return EnumType(rep)
if rep in BUILTIN_TYPES:
return BUILTIN_TYPES[rep]
raise ValidationError("No such schema type: %s" % rep)
def _dump_dict(items):
"""
Remove all items with None value from a mapping.
@return: Map of non-None items.
"""
return OrderedDict((k, v) for k, v in items if v)
class AttributeType(object):
"""
Definition of an attribute.
@ivar name: Attribute name.
@ivar atype: Attribute L{Type}
@ivar required: True if the attribute is required.
@ivar default: Default value for the attribute or None if no default. Can be a reference.
@ivar value: Fixed value for the attribute. Can be a reference.
@ivar unique: True if the attribute value is unique.
@ivar description: Description of the attribute type.
@ivar defined_in: EntityType in which this attribute is defined.
@ivar create: If true the attribute can be set by CREATE.
@ivar update: If true the attribute can be modified by UPDATE.
@ivar graph: If true the attribute could be graphed by a console.
"""
def __init__(self, name, type=None, defined_in=None, default=None,
required=False, unique=False, hidden=False, deprecated=False, deprecationName=None,
value=None, description="", create=False, update=False, graph=False):
"""
See L{AttributeType} instance variables.
"""
try:
self.name = name
self.type = type
self.defined_in = defined_in
self.atype = get_type(self.type)
self.required = required
self.hidden = hidden
self.deprecated = deprecated
self.default = default
self.deprecation_name = deprecationName
self.value = value
self.unique = unique
self.description = description
if self.value is not None and self.default is not None:
raise ValidationError("Attribute '%s' has default value and fixed value" %
self.name)
self.create=create
self.update=update
self.graph=graph
except Exception:
raise ValidationError("Attribute '%s': %s\n%s"
% (name,
sys.exc_info()[1],
sys.exc_info()[2]))
def missing_value(self):
"""
Fill in missing default and fixed values.
"""
if self.value is not None: # Fixed value attribute
return self.value
if self.default is not None:
return self.default
if self.required:
raise ValidationError("Missing required attribute '%s'" % (self.name))
def validate(self, value):
"""
Validate value for this attribute definition.
@param value: The value to validate.
@return: value converted to the correct python type. Rais exception if any check fails.
"""
if self.value and value != self.value:
raise ValidationError("Attribute '%s' has fixed value '%s' but given '%s'"%(
self.name, self.value, value))
try:
return self.atype.validate(value)
except (TypeError, ValueError) as e:
raise ValidationError("%s:%s" % (str(e), sys.exc_info()[2]))
def dump(self):
"""
@return: Json-friendly representation of an attribute type
"""
return _dump_dict([
('type', self.atype.dump()),
('default', self.default),
('required', self.required),
('unique', self.unique),
('deprecated', self.deprecated),
('description', self.description),
('graph', self.graph)
])
def __str__(self):
return self.name
class MessageDef(object):
"""A request or response message"""
def __init__(self, body=None, properties=None):
self.body = None
if body: self.body = AttributeType("body", **body)
self.properties = dict((name, AttributeType(name, **value))
for name, value in (properties or {}).items())
class OperationDef(object):
"""An operation definition"""
def __init__(self, name, description=None, request=None, response=None):
try:
self.name = name
self.description = description
self.request = self.response = None
if request: self.request = MessageDef(**request)
if response: self.response = MessageDef(**response)
except Exception as exc:
raise ValidationError("Operation '%s': %s\n%s"
% (name, str(exc), sys.exc_info()[2]))
class EntityType(object):
"""
An entity type defines a set of attributes for an entity.
@ivar name: Fully qualified entity type name.
@ivar short_name: Un-prefixed short name.
@ivar attributes: Map of L{AttributeType} for entity.
@ivar singleton: If true only one entity of this type is allowed.
@ivar referential: True if an entity can be referred to by name from another entity.
"""
def __init__(self, name, schema, attributes=None, operations=None, operationDefs=None,
description="", fullName=True, singleton=False, deprecated=False,
extends=None, referential=False):
"""
@param name: name of the entity type.
@param schema: schema for this type.
@param singleton: True if entity type is a singleton.
@param attributes: Map of attributes {name: {type:, default:, required:, unique:}}
@param description: Human readable description.
@param operations: Allowed operations, list of operation names.
"""
try:
self.schema = schema
self.description = description
if fullName:
self.name = schema.long_name(name)
self.short_name = schema.short_name(name)
if self.short_name.startswith("router.config."):
self.short_name = self.short_name.replace("router.config.", "")
else:
self.name = self.short_name = name
self.attributes = OrderedDict((k, AttributeType(k, defined_in=self, **v))
for k, v in (attributes or {}).items())
self.deprecated_attributes = OrderedDict()
for key, value in self.attributes.items():
if value.deprecation_name or value.deprecated:
attr_type = AttributeType(value.deprecation_name or key,
type=value.type,
defined_in=self,
default=value.default,
required=value.required,
unique=value.unique,
hidden=value.hidden,
deprecated=True,
deprecationName=None,
value=value.value,
description="(DEPRECATED) " + value.description,
create=value.create,
update=value.update,
graph=value.graph)
if value.deprecation_name:
self.deprecated_attributes[value.deprecation_name] = attr_type
else:
self.deprecated_attributes[key] = attr_type
self.operations = operations or []
# Bases are resolved in self.init()
self.base = extends
self.all_bases = []
self.references = []
self.singleton = singleton
self.deprecated = deprecated
self.referential = referential
self._init = False # Have not yet initialized from base and attributes.
# Operation definitions
self.operation_defs = dict((name, OperationDef(name, **op))
for name, op in (operationDefs or {}).items())
except Exception as exc:
raise ValidationError("%s '%s': %s\n%s" % (type(self).__name__,
name,
exc,
sys.exc_info()[2]))
def init(self):
"""Find bases after all types are loaded."""
if self._init: return
self._init = True
if self.base:
self.base = self.schema.entity_type(self.base)
self.base.init()
self.all_bases = [self.base] + self.base.all_bases
self._extend(self.base, 'extend')
def _extend(self, other, how):
"""Add attributes and operations from other"""
def check(a, b, what):
overlap = set(a) & set(b)
if overlap:
raise ValidationError("'%s' cannot %s '%s', re-defines %s: %s"
% (self.name, how, other.short_name, what, ",".join(overlap)))
check(self.operations, other.operations, "operations")
self.operations += other.operations
check(dict_keys(self.attributes), other.attributes.values(), "attributes")
self.attributes.update(other.attributes)
if other.name == 'entity':
# Fill in entity "type" attribute automatically.
self.attributes["type"]["value"] = self.name
def extends(self, base): return base in self.all_bases
def is_a(self, type): return type == self or self.extends(type)
def attribute(self, name):
"""Get the AttributeType for name"""
if not name in self.attributes and not name in dict_keys(self.deprecated_attributes):
raise ValidationError("Unknown attribute '%s' for '%s'" % (name, self))
if self.attributes.get(name):
return self.attributes[name]
if self.deprecated_attributes.get(name):
return self.deprecated_attributes[name]
return None
def log(self, level, text):
self.schema.log(level, text)
@property
def my_attributes(self):
"""Return only attribute types defined in this entity type"""
return [a for a in self.attributes.values() if a.defined_in == self]
def validate(self, attributes):
"""
Validate attributes for entity type.
@param attributes: Map attributes name:value or Entity with attributes property.
Modifies attributes: adds defaults, converts values.
"""
if isinstance(attributes, SchemaEntity): attributes = attributes.attributes
try:
# Add missing values
for attr in self.attributes.values():
if attributes.get(attr.name) is None:
value = None
deprecation_name = attr.deprecation_name
if deprecation_name:
value = attributes.get(deprecation_name)
if value is not None:
if logger_available:
self.log(LOG_WARNING, "Attribute '%s' of entity '%s' has been deprecated."
" Use '%s' instead"%(deprecation_name, self.short_name, attr.name))
del attributes[deprecation_name]
if value is None:
value = attr.missing_value()
if value is not None:
attributes[attr.name] = value
if value is None and attr.name in attributes:
del attributes[attr.name]
else:
deprecation_name = attr.deprecation_name
if deprecation_name:
value = attributes.get(deprecation_name)
if not value is None:
# Both name and deprecation name have values
# For example, both dir and direction of linkRoute have been specified, This is
# illegal. Just fail.
raise ValidationError("Both '%s' and '%s' cannot be specified for entity '%s'" %
(deprecation_name, attr.name, self.short_name))
# Validate attributes.
for name, value in dict_items(attributes):
if name == 'type':
value = self.schema.long_name(value)
attributes[name] = self.attribute(name).validate(value)
except ValidationError as e:
raise ValidationError("%s: %s" % (self, e))
return attributes
def allowed(self, op, body):
"""Raise exception if op is not a valid operation on entity."""
op = op.upper()
if not op in self.operations:
raise NotImplementedStatus("Operation '%s' not implemented for '%s' %s" % (
op, self.name, self.operations))
def create_check(self, attributes):
for a in attributes:
if not self.attribute(a).create:
raise ValidationError("Cannot set attribute '%s' in CREATE" % a)
def update_check(self, new_attributes, old_attributes):
for a, v in new_attributes.items():
# Its not an error to include an attribute in UPDATE if the value is not changed.
if not self.attribute(a).update and \
not (a in old_attributes and old_attributes[a] == v):
raise ValidationError("Cannot update attribute '%s' in UPDATE" % a)
def dump(self):
"""Json friendly representation"""
return _dump_dict([
('attributes', OrderedDict(
(k, v.dump()) for k, v in self.attributes.items()
if k != 'type')), # Don't dump 'type' attribute, dumped separately.
('operations', self.operations),
('description', self.description or None),
('fullyQualifiedType', self.name or None),
('references', self.references),
('deprecated', self.deprecated),
('singleton', self.singleton)
])
def __repr__(self): return "%s(%s)" % (type(self).__name__, self.name)
def __str__(self): return self.name
def name_is(self, name):
return self.name == self.schema.long_name(name)
class Schema(object):
"""
Schema defining entity types.
Note: keyword arguments come from schema so use camelCase
@ivar prefix: Prefix to prepend to short entity type names.
@ivar entityTypes: Map of L{EntityType} by name.
@ivar description: Text description of schema.
"""
def __init__(self, prefix="", entityTypes=None, description=""):
"""
@param prefix: Prefix for entity names.
@param entity_types: Map of { entityTypeName: { singleton:, attributes:{...}}}
@param description: Human readable description.
"""
if logger_available:
self.log_adapter = LogAdapter("AGENT")
else:
self.log_adapter = None
if prefix:
self.prefix = prefix.strip('.')
self.prefixdot = self.prefix + '.'
else:
self.prefix = self.prefixdot = ""
self.description = description
def parsedefs(cls, defs):
return OrderedDict((self.long_name(k), cls(k, self, **v))
for k, v in (defs or {}).items())
self.entity_types = parsedefs(EntityType, entityTypes)
self.all_attributes = set()
for e in self.entity_types.values():
e.init()
self.all_attributes.update(dict_keys(e.attributes))
def log(self, level, text):
if not self.log_adapter:
return
info = traceback.extract_stack(limit=2)[0] # Caller frame info
self.log_adapter.log(level, text, info[0], info[1])
def short_name(self, name):
"""Remove prefix from name if present"""
if not name: return name
if name.startswith(self.prefixdot):
name = name[len(self.prefixdot):]
return name
def long_name(self, name):
"""Add prefix to unqualified name"""
if not name: return name
if not name.startswith(self.prefixdot):
name = self.prefixdot + name
return name
def dump(self):
"""Return json-friendly representation"""
return OrderedDict([
('prefix', self.prefix),
('entityTypes',
OrderedDict((e.short_name, e.dump()) for e in self.entity_types.values()))
])
def _lookup(self, map, name, message, error):
found = map.get(name) or map.get(self.long_name(name))
if not found and error:
raise ValidationError(message % name)
return found
def entity_type(self, name, error=True):
return self._lookup(self.entity_types, name, "No such entity type '%s'", error)
def validate_entity(self, attributes):
"""
Validate a single entity.
@param attributes: Map of attribute name: value
"""
attributes['type'] = self.long_name(attributes['type'])
entity_type = self.entity_type(attributes['type'])
entity_type.validate(attributes)
def validate_all(self, attribute_maps):
"""
Validate all the entities from entity_iter, return a list of valid entities.
"""
entities = []
for a in attribute_maps:
self.validate_add(a, entities)
entities.append(a);
def validate_add(self, attributes, entities):
"""
Validate that attributes would be valid when added to entities.
Assumes entities are already valid
@raise ValidationError if adding e violates a global constraint like uniqueness.
"""
self.validate_entity(attributes)
entity_type = self.entity_type(attributes['type'])
# Find all the unique attribute types present in attributes
unique = [a for a in entity_type.attributes.values() if a.unique and a.name in attributes]
if not unique and not entity_type.singleton:
return # Nothing to do
for e in entities:
if entity_type.singleton and attributes['type'] == e['type']:
raise ValidationError("Adding %s singleton %s when %s already exists" %
(attributes['type'], attributes, e))
for a in unique:
try:
if entity_type.attributes[a.name] == a and attributes[a.name] == e[a.name]:
raise ValidationError(
"adding %s duplicates unique attribute '%s' from existing %s"%
(attributes, a.name, e))
except KeyError:
continue # Missing attribute or definition means no clash
def entity(self, attributes):
"""Convert an attribute map into an L{SchemaEntity}"""
attributes = dict((k, v) for k, v in attributes.items() if v is not None)
return SchemaEntity(self.entity_type(attributes['type']), attributes)
def entities(self, attribute_maps):
"""Convert a list of attribute maps into a list of L{SchemaEntity}"""
return [self.entity(m) for m in attribute_maps]
def filter(self, predicate):
"""Return an iterator over entity types that satisfy predicate."""
if predicate is None: return self.entity_types.values()
return (t for t in self.entity_types.values() if predicate(t))
def by_type(self, type):
"""Return an iterator over entity types that extend or are type.
If type is None return all entities."""
if not type:
return self.entity_types.values()
else:
return self.filter(lambda t: t.is_a(type))
class SchemaEntity(EntityBase):
"""A map of attributes associated with an L{EntityType}"""
def __init__(self, entity_type, attributes=None, validate=True, **kwattrs):
super(SchemaEntity, self).__init__(attributes, **kwattrs)
self.__dict__['entity_type'] = entity_type
self.attributes.setdefault('type', entity_type.name)
if validate: self.validate()
def _set(self, name, value):
super(SchemaEntity, self)._set(name, value)
self.validate()
def validate(self):
self.entity_type.validate(self.attributes)
|
|
#!/usr/bin/env python
#
# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from string import Template
import dto_gen
import util
jvpp_facade_callback_template = Template("""
package $base_package.$future_package;
/**
* <p>Async facade callback setting values to future objects
* <br>It was generated by jvpp_future_facade_gen.py based on $inputfile
* <br>(python representation of vpe.api generated by vppapigen).
*/
public final class FutureJVppFacadeCallback implements $base_package.$callback_package.JVppGlobalCallback {
private final java.util.Map<java.lang.Integer, java.util.concurrent.CompletableFuture<? extends $base_package.$dto_package.JVppReply<?>>> requests;
private final $base_package.$notification_package.GlobalNotificationCallback notificationCallback;
public FutureJVppFacadeCallback(final java.util.Map<java.lang.Integer, java.util.concurrent.CompletableFuture<? extends $base_package.$dto_package.JVppReply<?>>> requestMap,
final $base_package.$notification_package.GlobalNotificationCallback notificationCallback) {
this.requests = requestMap;
this.notificationCallback = notificationCallback;
}
@Override
@SuppressWarnings("unchecked")
public void onError(org.openvpp.jvpp.VppCallbackException reply) {
final java.util.concurrent.CompletableFuture<$base_package.$dto_package.JVppReply<?>> completableFuture;
synchronized(requests) {
completableFuture = (java.util.concurrent.CompletableFuture<$base_package.$dto_package.JVppReply<?>>) requests.get(reply.getCtxId());
}
if(completableFuture != null) {
completableFuture.completeExceptionally(reply);
synchronized(requests) {
requests.remove(reply.getCtxId());
}
}
}
$methods
}
""")
jvpp_facade_callback_method_template = Template("""
@Override
@SuppressWarnings("unchecked")
public void on$callback_dto($base_package.$dto_package.$callback_dto reply) {
final java.util.concurrent.CompletableFuture<$base_package.$dto_package.JVppReply<?>> completableFuture;
synchronized(requests) {
completableFuture = (java.util.concurrent.CompletableFuture<$base_package.$dto_package.JVppReply<?>>) requests.get(reply.context);
}
if(completableFuture != null) {
completableFuture.complete(reply);
synchronized(requests) {
requests.remove(reply.context);
}
}
}
""")
jvpp_facade_callback_notification_method_template = Template("""
@Override
public void on$callback_dto($base_package.$dto_package.$callback_dto notification) {
notificationCallback.on$callback_dto(notification);
}
""")
# TODO reuse common parts with generic method callback
jvpp_facade_control_ping_method_template = Template("""
@Override
@SuppressWarnings("unchecked")
public void on$callback_dto($base_package.$dto_package.$callback_dto reply) {
final java.util.concurrent.CompletableFuture<$base_package.$dto_package.JVppReply<?>> completableFuture;
synchronized(requests) {
completableFuture = (java.util.concurrent.CompletableFuture<$base_package.$dto_package.JVppReply<?>>) requests.get(reply.context);
}
if(completableFuture != null) {
// Finish dump call
if (completableFuture instanceof $base_package.$future_package.FutureJVppFacade.CompletableDumpFuture) {
completableFuture.complete((($base_package.$future_package.FutureJVppFacade.CompletableDumpFuture) completableFuture).getReplyDump());
// Remove future mapped to dump call context id
synchronized(requests) {
requests.remove((($base_package.$future_package.FutureJVppFacade.CompletableDumpFuture) completableFuture).getContextId());
}
} else {
completableFuture.complete(reply);
}
synchronized(requests) {
requests.remove(reply.context);
}
}
}
""")
jvpp_facade_details_callback_method_template = Template("""
@Override
@SuppressWarnings("unchecked")
public void on$callback_dto($base_package.$dto_package.$callback_dto reply) {
final FutureJVppFacade.CompletableDumpFuture<$base_package.$dto_package.$callback_dto_reply_dump> completableFuture;
synchronized(requests) {
completableFuture = ($base_package.$future_package.FutureJVppFacade.CompletableDumpFuture<$base_package.$dto_package.$callback_dto_reply_dump>) requests.get(reply.context);
}
if(completableFuture != null) {
$base_package.$dto_package.$callback_dto_reply_dump replyDump = completableFuture.getReplyDump();
if(replyDump == null) {
replyDump = new $base_package.$dto_package.$callback_dto_reply_dump();
completableFuture.setReplyDump(replyDump);
}
replyDump.$callback_dto_field.add(reply);
}
}
""")
def generate_jvpp(func_list, base_package, dto_package, callback_package, notification_package, future_facade_package, inputfile):
""" Generates JVpp interface and JNI implementation """
print "Generating JVpp future facade"
if not os.path.exists(future_facade_package):
raise Exception("%s folder is missing" % future_facade_package)
methods = []
methods_impl = []
callbacks = []
for func in func_list:
if util.is_ignored(func['name']):
continue
camel_case_name_with_suffix = util.underscore_to_camelcase_upper(func['name'])
if not util.is_reply(camel_case_name_with_suffix) and not util.is_notification(func['name']):
continue
camel_case_method_name = util.underscore_to_camelcase(func['name'])
if not util.is_notification(func["name"]):
camel_case_request_method_name = util.remove_reply_suffix(util.underscore_to_camelcase(func['name']))
if util.is_details(camel_case_name_with_suffix):
camel_case_reply_name = get_standard_dump_reply_name(util.underscore_to_camelcase_upper(func['name']),
func['name'])
callbacks.append(jvpp_facade_details_callback_method_template.substitute(base_package=base_package,
dto_package=dto_package,
callback_dto=camel_case_name_with_suffix,
callback_dto_field=camel_case_method_name,
callback_dto_reply_dump=camel_case_reply_name + dto_gen.dump_dto_suffix,
future_package=future_facade_package))
methods.append(future_jvpp_method_template.substitute(base_package=base_package,
dto_package=dto_package,
method_name=camel_case_request_method_name +
util.underscore_to_camelcase_upper(util.dump_suffix),
reply_name=camel_case_reply_name + dto_gen.dump_dto_suffix,
request_name=util.remove_reply_suffix(camel_case_reply_name) +
util.underscore_to_camelcase_upper(util.dump_suffix)))
methods_impl.append(future_jvpp_method_impl_template.substitute(base_package=base_package,
dto_package=dto_package,
method_name=camel_case_request_method_name +
util.underscore_to_camelcase_upper(util.dump_suffix),
reply_name=camel_case_reply_name + dto_gen.dump_dto_suffix,
request_name=util.remove_reply_suffix(camel_case_reply_name) +
util.underscore_to_camelcase_upper(util.dump_suffix)))
else:
request_name = util.underscore_to_camelcase_upper(util.unconventional_naming_rep_req[func['name']]) \
if func['name'] in util.unconventional_naming_rep_req else util.remove_reply_suffix(camel_case_name_with_suffix)
methods.append(future_jvpp_method_template.substitute(base_package=base_package,
dto_package=dto_package,
method_name=camel_case_request_method_name,
reply_name=camel_case_name_with_suffix,
request_name=request_name))
methods_impl.append(future_jvpp_method_impl_template.substitute(base_package=base_package,
dto_package=dto_package,
method_name=camel_case_request_method_name,
reply_name=camel_case_name_with_suffix,
request_name=request_name))
# Callback handler is a bit special and a different template has to be used
if util.is_control_ping(camel_case_name_with_suffix):
callbacks.append(jvpp_facade_control_ping_method_template.substitute(base_package=base_package,
dto_package=dto_package,
callback_dto=camel_case_name_with_suffix,
future_package=future_facade_package))
else:
callbacks.append(jvpp_facade_callback_method_template.substitute(base_package=base_package,
dto_package=dto_package,
callback_dto=camel_case_name_with_suffix))
if util.is_notification(func["name"]):
callbacks.append(jvpp_facade_callback_notification_method_template.substitute(base_package=base_package,
dto_package=dto_package,
callback_dto=util.add_notification_suffix(camel_case_name_with_suffix)))
jvpp_file = open(os.path.join(future_facade_package, "FutureJVppFacadeCallback.java"), 'w')
jvpp_file.write(jvpp_facade_callback_template.substitute(inputfile=inputfile,
base_package=base_package,
dto_package=dto_package,
notification_package=notification_package,
callback_package=callback_package,
methods="".join(callbacks),
future_package=future_facade_package))
jvpp_file.flush()
jvpp_file.close()
jvpp_file = open(os.path.join(future_facade_package, "FutureJVpp.java"), 'w')
jvpp_file.write(future_jvpp_template.substitute(inputfile=inputfile,
base_package=base_package,
methods="".join(methods),
future_package=future_facade_package))
jvpp_file.flush()
jvpp_file.close()
jvpp_file = open(os.path.join(future_facade_package, "FutureJVppFacade.java"), 'w')
jvpp_file.write(future_jvpp_facade_template.substitute(inputfile=inputfile,
base_package=base_package,
dto_package=dto_package,
methods="".join(methods_impl),
future_package=future_facade_package))
jvpp_file.flush()
jvpp_file.close()
future_jvpp_template = Template('''
package $base_package.$future_package;
/**
* <p>Async facade extension adding specific methods for each request invocation
* <br>It was generated by jvpp_future_facade_gen.py based on $inputfile
* <br>(python representation of vpe.api generated by vppapigen).
*/
public interface FutureJVpp extends FutureJVppInvoker {
$methods
}
''')
future_jvpp_method_template = Template('''
java.util.concurrent.CompletionStage<$base_package.$dto_package.$reply_name> $method_name($base_package.$dto_package.$request_name request);
''')
future_jvpp_facade_template = Template('''
package $base_package.$future_package;
/**
* <p>Implementation of FutureJVpp based on FutureJVppInvokerFacade
* <br>It was generated by jvpp_future_facade_gen.py based on $inputfile
* <br>(python representation of vpe.api generated by vppapigen).
*/
public class FutureJVppFacade extends FutureJVppInvokerFacade implements FutureJVpp {
/**
* <p>Create FutureJVppFacade object for provided JVpp instance.
* Constructor internally creates FutureJVppFacadeCallback class for processing callbacks
* and then connects to provided JVpp instance
*
* @param jvpp provided $base_package.JVpp instance
*
* @throws java.io.IOException in case instance cannot connect to JVPP
*/
public FutureJVppFacade(final $base_package.JVpp jvpp) throws java.io.IOException {
super(jvpp, new java.util.HashMap<>());
jvpp.connect(new FutureJVppFacadeCallback(getRequests(), getNotificationCallback()));
}
$methods
}
''')
future_jvpp_method_impl_template = Template('''
@Override
public java.util.concurrent.CompletionStage<$base_package.$dto_package.$reply_name> $method_name($base_package.$dto_package.$request_name request) {
return send(request);
}
''')
# Returns request name or special one from unconventional_naming_rep_req map
def get_standard_dump_reply_name(camel_case_dto_name, func_name):
# FIXME this is a hotfix for sub-details callbacks
# FIXME also for L2FibTableEntry
# It's all because unclear mapping between
# request -> reply,
# dump -> reply, details,
# notification_start -> reply, notifications
# vpe.api needs to be "standardized" so we can parse the information and create maps before generating java code
suffix = func_name.split("_")[-1]
return util.underscore_to_camelcase_upper(
util.unconventional_naming_rep_req[func_name]) + util.underscore_to_camelcase_upper(suffix) if func_name in util.unconventional_naming_rep_req \
else camel_case_dto_name
|
|
input_data = open("newfile.txt", "r")
output_html = open("output.html", "w")
output_css = open("output.css", "w")
# Read in from file
elements = []
for line in input_data:
temp = line.split(" ")
elements.append(temp)
print temp
# HTML specific functions
def html_header():
output_html.write("<!DOCTYPE html>\n")
def html_open():
output_html.write("<html>\n")
def html_close():
output_html.write("</html>\n")
def head_open():
output_html.write("<head>\n")
def head_close():
output_html.write("</head>\n")
def body_open():
output_html.write("<body>\n")
def body_close():
output_html.write("</body>\n")
def div_open(value):
output_html.write("<div class=" + "\"" + value + "\""+ ">\n")
def div_close():
output_html.write("</div>\n")
def paragraph_open():
output_html.write("<p>\n")
def paragraph_close():
output_html.write("</p>\n")
def place_text(text):
output_html.write(text + "\n")
def header_open(value):
output_html.write("<h" + value + ">\n")
def header_close(value):
output_html.write("</h" + value + ">\n")
def button_html(value):
output_html.write("<a href=\"#\" class=\"btn btn-" + value + " btn-default\">Sample Button</a>\n")
def img_html(x, y):
output_html.write("<img src=\"http://www.placehold.it/" + x + "x" + y + "\">\n")
def html_includeCSS():
output_html.write("<link rel=\"stylesheet\" type=\"text/css\" href=\"output.css\">\n")
def html_baseSetup():
output_html.write("<meta charset=\"utf-8\">\n")
output_html.write("<meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\">\n")
output_html.write("<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n")
output_html.write("<meta name=\"description\" content=\"\">\n")
output_html.write("<meta name=\"author\" content=\"\">\n")
def html_baseCSSDependencies():
output_html.write("<link href=\"bootstrap.min.css\" rel=\"stylesheet\">\n")
def html_baseJSDependencies():
output_html.write("<script src=\"jquery.js\"></script>\n")
output_html.write("<script src=\"bootstrap.min.js\"></script>\n")
# CSS specific functions
def css_open(tag):
output_css.write("." + tag + " {\n")
def css_close():
output_css.write("}\n")
def css_color(value):
output_css.write("color: " + value + ";\n")
def css_backgroundColor(value):
output_css.write("background-color: " + value + ";\n")
def css_position(value):
output_css.write("position: " + value + ";\n")
def css_height(value):
output_css.write("height: " + value + "px;\n")
def css_width(value):
output_css.write("width: " + value + "px;\n")
def css_left(value):
output_css.write("left: " + value + "px;\n")
def css_top(value):
output_css.write("top: " + value + "px;\n")
html_header()
html_open()
head_open()
html_baseSetup()
html_includeCSS()
html_baseCSSDependencies()
head_close()
body_open()
print "starting loop"
for x in range(len(elements)):
print "iteration" + str(x)
temp = elements[x]
if (temp[0] == "text"):
idVal = "text" + str(x)
position = "absolute"
size = str(temp[1])
color = temp[2]
left = str(temp[3])
top = str(temp[4])
height = str(temp[5])
width = str(temp[6])
div_open("container")
div_open(idVal)
paragraph_open()
header_open(size)
place_text("Sample text")
header_close(size)
paragraph_close()
div_close()
div_close()
css_open(idVal)
css_position(position)
css_height(height)
css_width(width)
css_color(color)
css_left(left)
css_top(top)
css_close()
elif (temp[0] == "image"):
idVal = "image" + str(x)
position = "absolute"
left = str(temp[1])
top = str(temp[2])
height = str(temp[3])
width = str(temp[4])
div_open("container")
div_open(idVal)
img_html(width, height)
div_close()
div_close()
css_open(idVal)
css_position(position)
css_height(height)
css_width(width)
css_left(left)
css_top(top)
css_close()
elif (temp[0] == "button"):
idVal = "button" + str(x)
idVal2 = "btncolor" + str(x)
position = "absolute"
backgroundColor = temp[1]
color = temp[2]
left = str(temp[3])
top = str(temp[4])
height = str(temp[5])
width = str(temp[6])
div_open("container")
div_open(idVal)
button_html(idVal2)
div_close()
div_close()
css_open(idVal)
css_position(position)
css_height(height)
css_width(width)
css_left(left)
css_top(top)
css_close()
css_open(idVal2)
css_backgroundColor(backgroundColor)
css_color(color)
css_close()
else:
pass
html_baseJSDependencies()
body_close()
html_close()
output_html.close()
output_css.close()
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for devappserver2.admin.datastore_viewer."""
import datetime
import os
import unittest
import google
import mox
import webapp2
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore
from google.appengine.api import datastore_types
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import datastore_stub_util
from google.appengine.tools.devappserver2 import api_server
from google.appengine.tools.devappserver2.admin import admin_request_handler
from google.appengine.tools.devappserver2.admin import datastore_viewer
class PropertyNameToValuesTest(unittest.TestCase):
"""Tests for datastore_viewer._property_name_to_value(s)."""
def setUp(self):
self.app_id = 'myapp'
self.entity1 = datastore.Entity('Kind1', id=123, _app=self.app_id)
self.entity1['cat'] = 5
self.entity1['dog'] = 10
self.entity2 = datastore.Entity('Kind1', id=124, _app=self.app_id)
self.entity2['dog'] = 15
self.entity2['mouse'] = 'happy'
def test_property_name_to_values(self):
self.assertEqual({'cat': [5],
'dog': mox.SameElementsAs([10, 15]),
'mouse': ['happy']},
datastore_viewer._property_name_to_values([self.entity1,
self.entity2]))
def test_property_name_to_value(self):
self.assertEqual({'cat': 5,
'dog': mox.Func(lambda v: v in [10, 15]),
'mouse': 'happy'},
datastore_viewer._property_name_to_value([self.entity1,
self.entity2]))
class GetWriteOpsTest(unittest.TestCase):
"""Tests for DatastoreRequestHandler._get_write_ops."""
def setUp(self):
self.app_id = 'myapp'
os.environ['APPLICATION_ID'] = self.app_id
# Use a consistent replication strategy so the puts done in the test code
# are seen immediately by the queries under test.
consistent_policy = datastore_stub_util.MasterSlaveConsistencyPolicy()
api_server.test_setup_stubs(
app_id=self.app_id,
application_root=None, # Needed to allow index updates.
datastore_consistency=consistent_policy)
def test_no_properties(self):
entity = datastore.Entity('Yar', id=123, _app=self.app_id) # 2 writes.
self.assertEquals(
2, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
def test_indexed_properties_no_composite_indexes(self):
entity = datastore.Entity('Yar', id=123, _app=self.app_id) # 2 writes.
entity['p1'] = None # 2 writes.
entity['p2'] = None # 2 writes.
entity['p3'] = [1, 2, 3] # 6 writes.
self.assertEquals(
12, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
def test_unindexed_properties_no_composite_indexes(self):
entity = datastore.Entity('Yar', id=123, _app=self.app_id) # 2 writes.
entity['u1'] = None # 0 writes.
entity['u2'] = None # 0 writes.
entity['u3'] = [1, 2, 3] # 0 writes.
entity.set_unindexed_properties(('u1', 'u2', 'u3'))
# unindexed properties have no impact on cost
self.assertEquals(
2, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
def test_composite_index(self):
ci = datastore_pb.CompositeIndex()
ci.set_app_id(datastore_types.ResolveAppId(None))
ci.set_id(0)
ci.set_state(ci.WRITE_ONLY)
index = ci.mutable_definition()
index.set_ancestor(0)
index.set_entity_type('Yar')
prop = index.add_property()
prop.set_name('this')
prop.set_direction(prop.ASCENDING)
prop = index.add_property()
prop.set_name('that')
prop.set_direction(prop.DESCENDING)
stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
stub.CreateIndex(ci)
self.assertEquals(1, len(datastore.GetIndexes()))
# no properties, no composite indices.
entity = datastore.Entity('Yar', id=123, _app=self.app_id) # 2 writes.
# We only have the 2 built-in index writes because the entity doesn't have
# property values for any of the index properties.
self.assertEquals(
2, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
entity['this'] = 4
# Unindexed property so no additional writes
entity.set_unindexed_properties(('this',))
self.assertEquals(
2, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
entity['that'] = 4
# Unindexed property so no additional writes
entity.set_unindexed_properties(('this', 'that'))
self.assertEquals(
2, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
# no indexed property value on 'that'
entity.set_unindexed_properties(('that',))
# 2 writes for the entity.
# 2 writes for the single indexed property.
self.assertEquals(
4, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
# indexed property value on both 'this' and 'that'
entity.set_unindexed_properties(())
# 2 writes for the entity
# 4 writes for the indexed properties
# 1 writes for the composite index
self.assertEquals(
7, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
# now run tests with null property values
entity = datastore.Entity('Yar', id=123, _app=self.app_id) # 2 writes.
entity['this'] = None
# 2 for the entity
# 2 for the single indexed property
self.assertEquals(
4, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
entity['that'] = None
# 2 for the entity
# 4 for the indexed properties
# 1 for the composite index
self.assertEquals(
7, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
# now run tests with a repeated property
entity = datastore.Entity('Yar', id=123, _app=self.app_id) # 2 writes.
entity['this'] = [1, 2, 3]
# 2 for the entity
# 6 for the indexed properties
self.assertEquals(
8, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
entity['that'] = None
# 2 for the entity
# 8 for the indexed properties
# 3 for the Composite index
self.assertEquals(
13, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
entity['that'] = [4, 5]
# 2 for the entity
# 10 for the indexed properties
# 6 for the Composite index
self.assertEquals(
18, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
def test_composite_index_no_properties(self):
ci = datastore_pb.CompositeIndex()
ci.set_app_id(datastore_types.ResolveAppId(None))
ci.set_id(0)
ci.set_state(ci.WRITE_ONLY)
index = ci.mutable_definition()
index.set_ancestor(0)
index.set_entity_type('Yar')
stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
stub.CreateIndex(ci)
self.assertEquals(1, len(datastore.GetIndexes()))
# no properties, and composite index with no properties.
entity = datastore.Entity('Yar', id=123, _app=self.app_id) # 2 writes.
# We have the 2 built-in index writes, and one for the entity key in the
# composite index despite the fact that there are no proerties defined in
# the index.
self.assertEquals(
3, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
# now with a repeated property
entity = datastore.Entity('Yar', id=123, _app=self.app_id) # 2 writes.
entity['this'] = [1, 2, 3]
# 2 for the entity
# 6 for the indexed properties
# 1 for the composite index
self.assertEquals(
9, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
def test_composite_ancestor_index(self):
ci = datastore_pb.CompositeIndex()
ci.set_app_id(datastore_types.ResolveAppId(None))
ci.set_id(0)
ci.set_state(ci.WRITE_ONLY)
index = ci.mutable_definition()
index.set_ancestor(1)
index.set_entity_type('Yar')
prop = index.add_property()
prop.set_name('this')
prop.set_direction(prop.ASCENDING)
prop = index.add_property()
prop.set_name('that')
prop.set_direction(prop.DESCENDING)
stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
stub.CreateIndex(ci)
self.assertEquals(1, len(datastore.GetIndexes()))
entity = datastore.Entity('Yar', id=123, _app=self.app_id) # 2 writes.
entity['this'] = 4
entity['that'] = 4
# 2 for the entity
# 4 for the indexed properties
# 1 for the composite index
self.assertEquals(
7, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
# Now use the same entity but give it an ancestor
parent_entity = datastore.Entity('parent', id=123, _app=self.app_id)
entity = datastore.Entity(
'Yar',
parent=parent_entity.key(),
id=123,
_app=self.app_id) # 2 writes.
entity['this'] = 4
entity['that'] = 4
# 2 writes for the entity.
# 4 writes for the indexed properties.
# 2 writes for the composite indices.
self.assertEquals(
8, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
# Now use the same entity but give it 2 ancestors.
grandparent_entity = datastore.Entity(
'grandparent', id=123, _app=self.app_id)
parent_entity = datastore.Entity(
'parent', parent=grandparent_entity.key(), id=123, _app=self.app_id)
entity = datastore.Entity(
'Yar',
parent=parent_entity.key(),
id=123,
_app=self.app_id) # 2 writes.
entity['this'] = 4
entity['that'] = 4
# 2 writes for the entity.
# 4 writes for the indexed properties.
# 3 writes for the composite indices.
self.assertEquals(
9, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
# Now try it with a multi-value prop
entity['this'] = [None, None, None]
# 2 writes for the entity.
# 8 writes for the indexed properties.
# 9 writes for the composite indices.
self.assertEquals(
19, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
# Now try it with 2 multi-value props.
entity['that'] = [None, None]
# 2 writes for the entity.
# 10 writes for the indexed properties.
# 18 writes for the composite indices.
self.assertEquals(
30, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
def test_composite_ancestor_index_no_properties(self):
ci = datastore_pb.CompositeIndex()
ci.set_app_id(datastore_types.ResolveAppId(None))
ci.set_id(0)
ci.set_state(ci.WRITE_ONLY)
index = ci.mutable_definition()
index.set_ancestor(1)
index.set_entity_type('Yar')
stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
stub.CreateIndex(ci)
self.assertEquals(1, len(datastore.GetIndexes()))
entity = datastore.Entity('Yar', id=123, _app=self.app_id) # 2 writes.
entity['this'] = [None, None]
# 2 writes for the entity.
# 4 writes for the indexed properties.
# 1 writes for the composite index.
self.assertEquals(
7, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
# Now use the same entity but give it an ancestor
parent_entity = datastore.Entity('parent', id=123, _app=self.app_id)
entity = datastore.Entity(
'Yar',
parent=parent_entity.key(),
id=123,
_app=self.app_id) # 2 writes.
entity['this'] = [None, None]
# 2 writes for the entity.
# 4 writes for the indexed properties.
# 2 writes for the composite indices.
self.assertEquals(
8, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
class GetEntitiesTest(unittest.TestCase):
"""Tests for DatastoreRequestHandler._get_entities."""
def setUp(self):
self.app_id = 'myapp'
os.environ['APPLICATION_ID'] = self.app_id
# Use a consistent replication strategy so the puts done in the test code
# are seen immediately by the queries under test.
consistent_policy = datastore_stub_util.MasterSlaveConsistencyPolicy()
api_server.test_setup_stubs(
app_id=self.app_id,
datastore_consistency=consistent_policy)
self.entity1 = datastore.Entity('Kind1', id=123, _app=self.app_id)
self.entity1['intprop'] = 1
self.entity1['listprop'] = [7, 8, 9]
datastore.Put(self.entity1)
self.entity2 = datastore.Entity('Kind1', id=124, _app=self.app_id)
self.entity2['stringprop'] = 'value2'
self.entity2['listprop'] = [4, 5, 6]
datastore.Put(self.entity2)
self.entity3 = datastore.Entity('Kind1', id=125, _app=self.app_id)
self.entity3['intprop'] = 3
self.entity3['stringprop'] = 'value3'
self.entity3['listprop'] = [1, 2, 3]
datastore.Put(self.entity3)
self.entity4 = datastore.Entity('Kind1', id=126, _app=self.app_id)
self.entity4['intprop'] = 4
self.entity4['stringprop'] = 'value4'
self.entity4['listprop'] = [10, 11, 12]
datastore.Put(self.entity4)
def test_ascending_int_order(self):
entities, total = datastore_viewer._get_entities(kind='Kind1',
namespace='',
order='intprop',
start=0,
count=100)
self.assertEqual([self.entity1, self.entity3, self.entity4], entities)
self.assertEqual(3, total)
def test_decending_string_order(self):
entities, total = datastore_viewer._get_entities(kind='Kind1',
namespace='',
order='-stringprop',
start=0,
count=100)
self.assertEqual([self.entity4, self.entity3, self.entity2], entities)
self.assertEqual(3, total)
def test_start_and_count(self):
entities, total = datastore_viewer._get_entities(kind='Kind1',
namespace='',
order='listprop',
start=1,
count=2)
self.assertEqual([self.entity2, self.entity1], entities)
self.assertEqual(4, total)
class GetEntityTemplateDataTest(unittest.TestCase):
def setUp(self):
self.app_id = 'myapp'
os.environ['APPLICATION_ID'] = self.app_id
# Use a consistent replication strategy so the puts done in the test code
# are seen immediately by the queries under test.
consistent_policy = datastore_stub_util.MasterSlaveConsistencyPolicy()
api_server.test_setup_stubs(
app_id=self.app_id,
datastore_consistency=consistent_policy)
self.entity1 = datastore.Entity('Kind1', id=123, _app=self.app_id)
self.entity1['intprop'] = 1
self.entity1['listprop'] = [7, 8, 9]
datastore.Put(self.entity1)
self.entity2 = datastore.Entity('Kind1', id=124, _app=self.app_id)
self.entity2['stringprop'] = 'value2'
self.entity2['listprop'] = [4, 5, 6]
datastore.Put(self.entity2)
self.entity3 = datastore.Entity('Kind1', id=125, _app=self.app_id)
self.entity3['intprop'] = 3
self.entity3['listprop'] = [1, 2, 3]
datastore.Put(self.entity3)
self.entity4 = datastore.Entity('Kind1', id=126, _app=self.app_id)
self.entity4['intprop'] = 4
self.entity4['stringprop'] = 'value4'
self.entity4['listprop'] = [10, 11]
datastore.Put(self.entity4)
def test(self):
headers, entities, total_entities = (
datastore_viewer.DatastoreRequestHandler._get_entity_template_data(
request_uri='http://next/',
kind='Kind1',
namespace='',
order='intprop',
start=1))
self.assertEqual(
[{'name': 'intprop'}, {'name': 'listprop'}, {'name': 'stringprop'}],
headers)
self.assertEqual(
[{'attributes': [{'name': u'intprop',
'short_value': '3',
'value': '3'},
{'name': u'listprop',
'short_value': mox.Regex(r'\[1L?, 2L?, 3L?\]'),
'value': mox.Regex(r'\[1L?, 2L?, 3L?\]')},
{'name': u'stringprop',
'short_value': '',
'value': ''}],
'edit_uri': '/datastore/edit/{0}?next=http%3A//next/'.format(
self.entity3.key()),
'key': datastore_types.Key.from_path(u'Kind1', 125, _app=u'myapp'),
'key_id': 125,
'key_name': None,
'shortened_key': 'agVteWFw...',
'write_ops': 10},
{'attributes': [{'name': u'intprop',
'short_value': '4',
'value': '4'},
{'name': u'listprop',
'short_value': mox.Regex(r'\[10L?, 11L?\]'),
'value': mox.Regex(r'\[10L?, 11L?\]')},
{'name': u'stringprop',
'short_value': u'value4',
'value': u'value4'}],
'edit_uri': '/datastore/edit/{0}?next=http%3A//next/'.format(
self.entity4.key()),
'key': datastore_types.Key.from_path(u'Kind1', 126, _app=u'myapp'),
'key_id': 126,
'key_name': None,
'shortened_key': 'agVteWFw...',
'write_ops': 10}],
entities)
self.assertEqual(3, total_entities)
class DatastoreRequestHandlerGetTest(unittest.TestCase):
"""Tests for DatastoreRequestHandler.get."""
def setUp(self):
self.app_id = 'myapp'
os.environ['APPLICATION_ID'] = self.app_id
api_server.test_setup_stubs(app_id=self.app_id)
self.mox = mox.Mox()
self.mox.StubOutWithMock(admin_request_handler.AdminRequestHandler,
'render')
def tearDown(self):
self.mox.UnsetStubs()
def test_empty_request_and_empty_datastore(self):
request = webapp2.Request.blank('/datastore')
response = webapp2.Response()
handler = datastore_viewer.DatastoreRequestHandler(request, response)
handler.render('datastore_viewer.html',
{'entities': [],
'headers': [],
'kind': None,
'kinds': [],
'message': None,
'namespace': '',
'num_pages': 0,
'order': None,
'paging_base_url': '/datastore?',
'order_base_url': '/datastore?',
'page': 1,
'select_namespace_url': '/datastore?namespace=',
'show_namespace': False,
'start': 0,
'total_entities': 0})
self.mox.ReplayAll()
handler.get()
self.mox.VerifyAll()
def test_empty_request_and_populated_datastore(self):
entity = datastore.Entity('Kind1', id=123, _app=self.app_id)
entity['intprop'] = 1
entity['listprop'] = [7, 8, 9]
datastore.Put(entity)
request = webapp2.Request.blank('/datastore')
response = webapp2.Response()
handler = datastore_viewer.DatastoreRequestHandler(request, response)
self.mox.ReplayAll()
handler.get()
self.mox.VerifyAll()
self.assertEqual(302, response.status_int)
self.assertEqual('http://localhost/datastore?kind=Kind1',
response.location)
def test_kind_request_and_populated_datastore(self):
entity = datastore.Entity('Kind1', id=123, _app=self.app_id)
entity['intprop'] = 1
entity['listprop'] = [7, 8, 9]
datastore.Put(entity)
request = webapp2.Request.blank('/datastore?kind=Kind1')
response = webapp2.Response()
handler = datastore_viewer.DatastoreRequestHandler(request, response)
handler.render(
'datastore_viewer.html',
{'entities': mox.IgnoreArg(), # Tested with _get_entity_template_data.
'headers': mox.IgnoreArg(), # Tested with _get_entity_template_data.
'kind': 'Kind1',
'kinds': ['Kind1'],
'message': None,
'namespace': '',
'num_pages': 1,
'order': None,
'order_base_url': '/datastore?kind=Kind1',
'page': 1,
'paging_base_url': '/datastore?kind=Kind1',
'select_namespace_url': '/datastore?kind=Kind1&namespace=',
'show_namespace': False,
'start': 0,
'total_entities': 1})
self.mox.ReplayAll()
handler.get()
self.mox.VerifyAll()
def test_order_request(self):
entity = datastore.Entity('Kind1', id=123, _app=self.app_id)
entity['intprop'] = 1
entity['listprop'] = [7, 8, 9]
datastore.Put(entity)
request = webapp2.Request.blank(
'/datastore?kind=Kind1&order=intprop')
response = webapp2.Response()
handler = datastore_viewer.DatastoreRequestHandler(request, response)
handler.render(
'datastore_viewer.html',
{'entities': mox.IgnoreArg(), # Tested with _get_entity_template_data.
'headers': mox.IgnoreArg(), # Tested with _get_entity_template_data.
'kind': 'Kind1',
'kinds': ['Kind1'],
'message': None,
'namespace': '',
'num_pages': 1,
'order': 'intprop',
'order_base_url': '/datastore?kind=Kind1',
'page': 1,
'paging_base_url': '/datastore?kind=Kind1&order=intprop',
'select_namespace_url':
'/datastore?kind=Kind1&namespace=&order=intprop',
'show_namespace': False,
'start': 0,
'total_entities': 1})
self.mox.ReplayAll()
handler.get()
self.mox.VerifyAll()
def test_namespace_request(self):
entity = datastore.Entity('Kind1',
id=123,
_app=self.app_id,
_namespace='google')
entity['intprop'] = 1
entity['listprop'] = [7, 8, 9]
datastore.Put(entity)
request = webapp2.Request.blank(
'/datastore?kind=Kind1&namespace=google')
response = webapp2.Response()
handler = datastore_viewer.DatastoreRequestHandler(request, response)
handler.render(
'datastore_viewer.html',
{'entities': mox.IgnoreArg(), # Tested with _get_entity_template_data.
'headers': mox.IgnoreArg(), # Tested with _get_entity_template_data.
'kind': 'Kind1',
'kinds': ['Kind1'],
'message': None,
'namespace': 'google',
'num_pages': 1,
'order': None,
'order_base_url': '/datastore?kind=Kind1&namespace=google',
'page': 1,
'paging_base_url': '/datastore?kind=Kind1&namespace=google',
'select_namespace_url':
'/datastore?kind=Kind1&namespace=google',
'show_namespace': True,
'start': 0,
'total_entities': 1})
self.mox.ReplayAll()
handler.get()
self.mox.VerifyAll()
def test_page_request(self):
for i in range(1000):
entity = datastore.Entity('Kind1', id=i+1, _app=self.app_id)
entity['intprop'] = i
datastore.Put(entity)
request = webapp2.Request.blank(
'/datastore?kind=Kind1&page=3')
response = webapp2.Response()
handler = datastore_viewer.DatastoreRequestHandler(request, response)
handler.render(
'datastore_viewer.html',
{'entities': mox.IgnoreArg(), # Tested with _get_entity_template_data.
'headers': mox.IgnoreArg(), # Tested with _get_entity_template_data.
'kind': 'Kind1',
'kinds': ['Kind1'],
'message': None,
'namespace': '',
'num_pages': 50,
'order': None,
'order_base_url': '/datastore?kind=Kind1&page=3',
'page': 3,
'paging_base_url': '/datastore?kind=Kind1',
'select_namespace_url':
'/datastore?kind=Kind1&namespace=&page=3',
'show_namespace': False,
'start': 40,
'total_entities': 1000})
self.mox.ReplayAll()
handler.get()
self.mox.VerifyAll()
class DatastoreEditRequestHandlerTest(unittest.TestCase):
"""Tests for DatastoreEditRequestHandler."""
def setUp(self):
self.app_id = 'myapp'
os.environ['APPLICATION_ID'] = self.app_id
# Use a consistent replication strategy so that the test can use queries
# to verify that an entity was written.
consistent_policy = datastore_stub_util.MasterSlaveConsistencyPolicy()
api_server.test_setup_stubs(
app_id=self.app_id,
datastore_consistency=consistent_policy)
self.mox = mox.Mox()
self.mox.StubOutWithMock(admin_request_handler.AdminRequestHandler,
'render')
self.entity1 = datastore.Entity('Kind1', id=123, _app=self.app_id)
self.entity1['intprop'] = 1
self.entity1['listprop'] = [7, 8, 9]
self.entity1['dateprop'] = datastore_types._OverflowDateTime(2**60)
datastore.Put(self.entity1)
self.entity2 = datastore.Entity('Kind1', id=124, _app=self.app_id)
self.entity2['stringprop'] = 'value2'
self.entity2['listprop'] = [4, 5, 6]
datastore.Put(self.entity2)
self.entity3 = datastore.Entity('Kind1', id=125, _app=self.app_id)
self.entity3['intprop'] = 3
self.entity3['listprop'] = [1, 2, 3]
datastore.Put(self.entity3)
self.entity4 = datastore.Entity('Kind1', id=126, _app=self.app_id)
self.entity4['intprop'] = 4
self.entity4['stringprop'] = 'value4'
self.entity4['listprop'] = [10, 11]
datastore.Put(self.entity4)
def tearDown(self):
self.mox.UnsetStubs()
def test_get_no_entity_key_string(self):
request = webapp2.Request.blank(
'/datastore/edit?kind=Kind1&next=http://next/')
response = webapp2.Response()
handler = datastore_viewer.DatastoreEditRequestHandler(request, response)
handler.render(
'datastore_edit.html',
{'fields': [('dateprop',
'overflowdatetime',
mox.Regex('^<input class="overflowdatetime".*'
'value="".*$')),
('intprop',
'int',
mox.Regex('^<input class="int".*value="".*$')),
('listprop', 'list', ''),
('stringprop',
'string',
mox.Regex('^<input class="string".*$'))],
'key': None,
'key_id': None,
'key_name': None,
'kind': 'Kind1',
'namespace': '',
'next': 'http://next/',
'parent_key': None,
'parent_key_string': None})
self.mox.ReplayAll()
handler.get()
self.mox.VerifyAll()
def test_get_no_entity_key_string_and_no_entities_in_namespace(self):
request = webapp2.Request.blank(
'/datastore/edit?kind=Kind1&namespace=cat&next=http://next/')
response = webapp2.Response()
handler = datastore_viewer.DatastoreEditRequestHandler(request, response)
self.mox.ReplayAll()
handler.get()
self.mox.VerifyAll()
self.assertEqual(302, response.status_int)
self.assertRegexpMatches(
response.location,
r'/datastore\?kind=Kind1&message=Cannot+.*&namespace=cat')
def test_get_entity_string(self):
request = webapp2.Request.blank(
'/datastore/edit/%s?next=http://next/' % self.entity1.key())
response = webapp2.Response()
handler = datastore_viewer.DatastoreEditRequestHandler(request, response)
handler.render(
'datastore_edit.html',
{'fields': [('dateprop',
'overflowdatetime',
mox.Regex('^<input class="overflowdatetime".*'
'value="1152921504606846976".*$')),
('intprop',
'int',
mox.Regex('^<input class="int".*value="1".*$')),
('listprop', 'list', mox.Regex(r'\[7L?, 8L?, 9L?\]'))],
'key': str(self.entity1.key()),
'key_id': 123,
'key_name': None,
'kind': 'Kind1',
'namespace': '',
'next': 'http://next/',
'parent_key': None,
'parent_key_string': None})
self.mox.ReplayAll()
handler.get(str(self.entity1.key()))
self.mox.VerifyAll()
def test_post_no_entity_key_string(self):
request = webapp2.Request.blank(
'/datastore/edit',
POST={'kind': 'Kind1',
'overflowdatetime|dateprop': '2009-12-24 23:59:59',
'int|intprop': '123',
'string|stringprop': 'Hello',
'next': 'http://redirect/'})
response = webapp2.Response()
handler = datastore_viewer.DatastoreEditRequestHandler(request, response)
self.mox.ReplayAll()
handler.post()
self.mox.VerifyAll()
self.assertEqual(302, response.status_int)
self.assertEqual('http://redirect/', response.location)
# Check that the entity was added.
query = datastore.Query('Kind1')
query.update({'dateprop': datetime.datetime(2009, 12, 24, 23, 59, 59),
'intprop': 123,
'stringprop': 'Hello'})
self.assertEquals(1, query.Count())
def test_post_entity_key_string(self):
request = webapp2.Request.blank(
'/datastore/edit/%s' % self.entity4.key(),
POST={'overflowdatetime|dateprop': str(2**60),
'int|intprop': '123',
'string|stringprop': '',
'next': 'http://redirect/'})
response = webapp2.Response()
handler = datastore_viewer.DatastoreEditRequestHandler(request, response)
self.mox.ReplayAll()
handler.post(str(self.entity4.key()))
self.mox.VerifyAll()
self.assertEqual(302, response.status_int)
self.assertEqual('http://redirect/', response.location)
# Check that the entity was updated.
entity = datastore.Get(self.entity4.key())
self.assertEqual(2**60, entity['dateprop'])
self.assertEqual(123, entity['intprop'])
self.assertEqual([10, 11], entity['listprop'])
self.assertNotIn('stringprop', entity)
if __name__ == '__main__':
unittest.main()
|
|
import os
import pygit2
from gitmodel.test import GitModelTestCase
class TestInstancesMixin(object):
def setUp(self):
super(TestInstancesMixin, self).setUp()
from gitmodel.test.fields import models
self.models = self.workspace.import_models(models)
self.person = self.models.Person(
slug='john-doe',
first_name='John',
last_name='Doe',
email='jdoe@example.com',
)
self.author = self.models.Author(
email='jdoe@example.com',
first_name='John',
last_name='Doe',
)
self.post = self.models.Post(
slug='test-post',
title='Test Post',
body='Lorem ipsum dolor sit amet',
)
class FieldValidationTest(TestInstancesMixin, GitModelTestCase):
def test_validate_not_empty(self):
# empty string on required field should trigger validationerror
self.person.last_name = ''
with self.assertRaises(self.exceptions.ValidationError):
self.person.save()
# None on required field should trigger validationerror
self.person.last_name = None
with self.assertRaises(self.exceptions.ValidationError):
self.person.save()
def test_validate_email(self):
self.person.email = 'foo_at_example.com'
with self.assertRaises(self.exceptions.ValidationError):
self.person.save()
def test_validate_slug(self):
self.person.slug = 'Foo Bar'
with self.assertRaises(self.exceptions.ValidationError):
self.person.save()
def test_validate_integer(self):
self.person.age = 20.5
with self.assertRaises(self.exceptions.ValidationError):
self.person.save()
self.person.age = 'twenty-one'
with self.assertRaises(self.exceptions.ValidationError):
self.person.save()
def test_validate_float(self):
self.person.tax_rate = '5%'
with self.assertRaises(self.exceptions.ValidationError):
self.person.save()
self.person.tax_rate = '1.2.3'
with self.assertRaises(self.exceptions.ValidationError):
self.person.save()
def test_validate_decimal(self):
self.person.account_balance = 'one.two'
with self.assertRaises(self.exceptions.ValidationError):
self.person.save()
self.person.account_balance = '1.2.3'
with self.assertRaises(self.exceptions.ValidationError):
self.person.save()
def test_validate_date(self):
# valid iso-8601 date
self.person.birth_date = '1978-12-07'
self.person.save()
# not a valid iso-8601 date
self.person.birth_date = '12/7/1978'
with self.assertRaises(self.exceptions.ValidationError):
self.person.save()
def test_validate_datetime(self):
# not a valid iso-8601 datetime
self.person.date_joined = '12/8/2012 4:53pm'
with self.assertRaises(self.exceptions.ValidationError):
self.person.save()
def test_validate_time(self):
self.person.wake_up_call = '9am'
with self.assertRaises(self.exceptions.ValidationError):
self.person.save()
self.person.wake_up_call = '2012-08-10 09:00'
with self.assertRaises(self.exceptions.ValidationError):
self.person.save()
class FieldTypeCheckingTest(TestInstancesMixin, GitModelTestCase):
def assertTypesMatch(self, field, test_values, type):
for value, eq_value in test_values.iteritems():
setattr(self.person, field, value)
self.person.save()
person = self.models.Person.get(self.person.id)
self.assertIsInstance(getattr(person, field), type)
self.assertEqual(getattr(person, field), eq_value)
def test_char(self):
from datetime import datetime
test_values = {
'John': 'John',
.007: '0.007',
datetime(2012, 12, 12): '2012-12-12 00:00:00'
}
self.assertTypesMatch('first_name', test_values, basestring)
def test_integer(self):
test_values = {33: 33, '33': 33}
self.assertTypesMatch('age', test_values, int)
def test_float(self):
test_values = {.825: .825, '0.825': .825}
self.assertTypesMatch('tax_rate', test_values, float)
def test_decimal(self):
from decimal import Decimal
test_values = {
'1.23': Decimal('1.23'),
'12.300': Decimal('12.3'),
1: Decimal('1.0')
}
self.assertTypesMatch('account_balance', test_values, Decimal)
def test_boolean(self):
test_values = {
True: True,
False: False,
1: True,
0: False,
None: False
}
self.assertTypesMatch('active', test_values, bool)
def test_date(self):
from datetime import date
test_values = {
'1978-12-7': date(1978, 12, 7),
'1850-05-05': date(1850, 5, 5),
}
self.assertTypesMatch('birth_date', test_values, date)
def test_datetime(self):
from datetime import datetime
from dateutil import tz
utc = tz.tzutc()
utc_offset = tz.tzoffset(None, -1 * 4 * 60 * 60)
test_values = {
'2012-05-30 14:32': datetime(2012, 5, 30, 14, 32),
'1820-8-13 9:23:48Z': datetime(1820, 8, 13, 9, 23, 48, 0, utc),
'2001-9-11 8:46:00-0400': datetime(2001, 9, 11, 8, 46, 0, 0,
utc_offset),
'2012-05-05 14:32:02.012345': datetime(2012, 5, 5, 14, 32, 2,
12345),
}
self.assertTypesMatch('date_joined', test_values, datetime)
# test a normal date
self.person.date_joined = '2012-01-01'
self.person.save()
person = self.models.Person.get(self.person.id)
self.assertEqual(type(person.date_joined), datetime)
self.assertEqual(person.date_joined, datetime(2012, 1, 1, 0, 0))
def test_time(self):
from datetime import time
from dateutil import tz
utc = tz.tzutc()
utc_offset = tz.tzoffset(None, -1 * 4 * 60 * 60)
test_values = {
'14:32': time(14, 32),
'9:23:48Z': time(9, 23, 48, 0, utc),
'8:46:00-0400': time(8, 46, 0, 0, utc_offset)
}
self.assertTypesMatch('wake_up_call', test_values, time)
class RelatedFieldTest(TestInstancesMixin, GitModelTestCase):
def test_related(self):
self.author.save()
self.post.author = self.author
self.post.save()
post_id = self.post.get_id()
post = self.models.Post.get(post_id)
self.assertTrue(post.author.get_id() == self.author.get_id())
class BlobFieldTest(TestInstancesMixin, GitModelTestCase):
def test_blob_field(self):
fd = open(os.path.join(os.path.dirname(__file__),
'git-logo-2color.png'))
self.author.save()
self.post.author = self.author
self.post.image = fd
self.post.save()
#make sure stored file and original file are identical
post = self.models.Post.get(self.post.get_id())
saved_content = post.image.read()
fd.seek(0)
control = fd.read()
self.assertEqual(saved_content, control,
"Saved blob does not match file")
class InheritedFieldTest(TestInstancesMixin, GitModelTestCase):
def test_inherited_local_fields(self):
user = self.models.User(
slug='john-doe',
first_name='John',
last_name='Doe',
email='jdoe@example.com',
password='secret'
)
user.save()
# get user
user_retreived = self.models.User.get(user.id)
self.assertEqual(user_retreived.password, 'secret')
def test_inherited_related_fields(self):
self.author.save()
self.post.author = self.author
self.post.save()
user = self.models.User(
slug='john-doe',
first_name='John',
last_name='Doe',
email='jdoe@example.com',
password='secret',
last_read=self.post
)
user.save()
# get user
user_retreived = self.models.User.get(user.id)
self.assertEqual(user_retreived.last_read.get_id(), self.post.get_id())
class JSONFieldTest(TestInstancesMixin, GitModelTestCase):
def test_json_field(self):
metadata = {
'foo': 'bar',
'baz': 'qux'
}
self.author.save()
self.post.author = self.author
self.post.metadata = metadata
self.post.save()
post = self.models.Post.get(self.post.slug)
self.assertIsInstance(post.metadata, dict)
self.assertDictEqual(post.metadata, metadata)
class GitObjectFieldTest(TestInstancesMixin, GitModelTestCase):
def test_gitobject_field(self):
repo = self.workspace.repo
test_commit = self.person.save(commit=True, message='Test Commit')
test_blob = repo[self.workspace.index[self.person.get_data_path()].oid]
test_tree = repo[test_commit].tree
obj = self.models.GitObjectTestModel(
blob=test_blob.oid,
commit=test_commit,
tree=test_tree.oid
)
obj.save()
self.assertIsInstance(obj.commit, pygit2.Commit)
self.assertEqual(obj.commit.oid, repo[test_commit].oid)
self.assertIsInstance(obj.blob, pygit2.Blob)
self.assertEqual(obj.blob.oid, test_blob.oid)
self.assertIsInstance(obj.tree, pygit2.Tree)
self.assertEqual(obj.tree.oid, test_tree.oid)
err = '"commit" must be a valid git OID'
with self.assertRaisesRegexp(self.exceptions.ValidationError, err):
obj.commit = 'foo'
obj.save()
err = '"commit" must point to a Commit'
with self.assertRaisesRegexp(self.exceptions.ValidationError, err):
obj.commit = test_tree.oid
obj.save()
class EmailFieldTest(TestInstancesMixin, GitModelTestCase):
def test_email_field(self):
invalid = '"email" must be a valid e-mail address'
with self.assertRaisesRegexp(self.exceptions.ValidationError, invalid):
self.author.email = 'jdoe[at]example.com'
self.author.save()
self.author.email = 'jdoe@example.com'
self.author.save()
id = self.author.id
author = self.models.Author.get(id)
self.assertEqual(author.email, 'jdoe@example.com')
class URLFieldTest(TestInstancesMixin, GitModelTestCase):
def test_url_field(self):
invalid = '"url" must be a valid URL'
invalid_scheme = '"url" scheme must be one of http, https'
with self.assertRaisesRegexp(self.exceptions.ValidationError, invalid):
self.author.url = 'http//example.com/foo'
self.author.save()
with self.assertRaisesRegexp(self.exceptions.ValidationError,
invalid_scheme):
self.author.url = 'ftp://example.com/foo'
self.author.save()
self.author.url = 'http://example.com/foo'
self.author.save()
id = self.author.id
author = self.models.Author.get(id)
self.assertEqual(author.url, 'http://example.com/foo')
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import eventlet
import Queue
import six
import time
from st2actions.container.service import RunnerContainerService
from st2actions.runners import get_runner
from st2common import log as logging
from st2common.constants import action as action_constants
from st2common.persistence.executionstate import ActionExecutionState
from st2common.persistence.liveaction import LiveAction
from st2common.services import executions
from st2common.util.action_db import (get_action_by_ref, get_runnertype_by_name)
from st2common.util import date as date_utils
LOG = logging.getLogger(__name__)
__all__ = [
'Querier',
'QueryContext'
]
@six.add_metaclass(abc.ABCMeta)
class Querier(object):
def __init__(self, threads_pool_size=10, query_interval=1, empty_q_sleep_time=5,
no_workers_sleep_time=1, container_service=None):
self._query_threads_pool_size = threads_pool_size
self._query_contexts = Queue.Queue()
self._thread_pool = eventlet.GreenPool(self._query_threads_pool_size)
self._empty_q_sleep_time = empty_q_sleep_time
self._no_workers_sleep_time = no_workers_sleep_time
self._query_interval = query_interval
if not container_service:
container_service = RunnerContainerService()
self.container_service = container_service
self._started = False
def start(self):
self._started = True
while True:
while self._query_contexts.empty():
eventlet.greenthread.sleep(self._empty_q_sleep_time)
while self._thread_pool.free() <= 0:
eventlet.greenthread.sleep(self._no_workers_sleep_time)
self._fire_queries()
def add_queries(self, query_contexts=None):
if query_contexts is None:
query_contexts = []
LOG.debug('Adding queries to querier: %s' % query_contexts)
for query_context in query_contexts:
self._query_contexts.put((time.time(), query_context))
def is_started(self):
return self._started
def _fire_queries(self):
if self._thread_pool.free() <= 0:
return
while not self._query_contexts.empty() and self._thread_pool.free() > 0:
(last_query_time, query_context) = self._query_contexts.get_nowait()
if time.time() - last_query_time < self._query_interval:
self._query_contexts.put((last_query_time, query_context))
continue
else:
self._thread_pool.spawn(self._query_and_save_results, query_context)
def _query_and_save_results(self, query_context):
execution_id = query_context.execution_id
actual_query_context = query_context.query_context
LOG.debug('Querying external service for results. Context: %s' % actual_query_context)
try:
(status, results) = self.query(execution_id, actual_query_context)
except:
LOG.exception('Failed querying results for liveaction_id %s.', execution_id)
self._delete_state_object(query_context)
LOG.debug('Remove state object %s.', query_context)
return
liveaction_db = None
try:
liveaction_db = self._update_action_results(execution_id, status, results)
except Exception:
LOG.exception('Failed updating action results for liveaction_id %s', execution_id)
self._delete_state_object(query_context)
return
if status in action_constants.LIVEACTION_COMPLETED_STATES:
action_db = get_action_by_ref(liveaction_db.action)
if not action_db:
LOG.exception('Unable to invoke post run. Action %s '
'no longer exists.' % liveaction_db.action)
self._delete_state_object(query_context)
return
if status != action_constants.LIVEACTION_STATUS_CANCELED:
self._invoke_post_run(liveaction_db, action_db)
self._delete_state_object(query_context)
return
self._query_contexts.put((time.time(), query_context))
def _update_action_results(self, execution_id, status, results):
liveaction_db = LiveAction.get_by_id(execution_id)
if not liveaction_db:
raise Exception('No DB model for liveaction_id: %s' % execution_id)
if liveaction_db.status != action_constants.LIVEACTION_STATUS_CANCELED:
liveaction_db.status = status
liveaction_db.result = results
# Action has completed, record end_timestamp
if (liveaction_db.status in action_constants.LIVEACTION_COMPLETED_STATES and
not liveaction_db.end_timestamp):
liveaction_db.end_timestamp = date_utils.get_datetime_utc_now()
# update liveaction, update actionexecution and then publish update.
updated_liveaction = LiveAction.add_or_update(liveaction_db, publish=False)
executions.update_execution(updated_liveaction)
LiveAction.publish_update(updated_liveaction)
return updated_liveaction
def _invoke_post_run(self, actionexec_db, action_db):
LOG.info('Invoking post run for action execution %s. Action=%s; Runner=%s',
actionexec_db.id, action_db.name, action_db.runner_type['name'])
# Get an instance of the action runner.
runnertype_db = get_runnertype_by_name(action_db.runner_type['name'])
runner = get_runner(runnertype_db.runner_module)
# Configure the action runner.
runner.container_service = RunnerContainerService()
runner.action = action_db
runner.action_name = action_db.name
runner.action_execution_id = str(actionexec_db.id)
runner.entry_point = RunnerContainerService.get_entry_point_abs_path(
pack=action_db.pack, entry_point=action_db.entry_point)
runner.context = getattr(actionexec_db, 'context', dict())
runner.callback = getattr(actionexec_db, 'callback', dict())
runner.libs_dir_path = RunnerContainerService.get_action_libs_abs_path(
pack=action_db.pack, entry_point=action_db.entry_point)
# Invoke the post_run method.
runner.post_run(actionexec_db.status, actionexec_db.result)
def _delete_state_object(self, query_context):
state_db = ActionExecutionState.get_by_id(query_context.id)
if state_db is not None:
try:
LOG.info('Clearing state object: %s', state_db)
ActionExecutionState.delete(state_db)
except:
LOG.exception('Failed clearing state object: %s', state_db)
def query(self, execution_id, query_context):
"""
This is the method individual queriers must implement.
This method should return a tuple of (status, results).
status should be one of LIVEACTION_STATUS_SUCCEEDED, LIVEACTION_STATUS_RUNNING,
LIVEACTION_STATUS_FAILED defined in st2common.constants.action.
"""
pass
def print_stats(self):
LOG.info('\t --- Name: %s, pending queuries: %d', self.__class__.__name__,
self._query_contexts.qsize())
class QueryContext(object):
def __init__(self, obj_id, execution_id, query_context, query_module):
self.id = obj_id
self.execution_id = execution_id
self.query_context = query_context
self.query_module = query_module
@classmethod
def from_model(cls, model):
return QueryContext(str(model.id), str(model.execution_id), model.query_context,
model.query_module)
def __repr__(self):
return ('<QueryContext id=%s,execution_id=%s,query_context=%s>' %
(self.id, self.execution_id, self.query_context))
|
|
from xml.dom import minidom, Node, XML_NAMESPACE, XMLNS_NAMESPACE
import new
import re
import weakref
import _base
from html5lib import constants, ihatexml
from html5lib.constants import namespaces
moduleCache = {}
def getDomModule(DomImplementation):
name = "_" + DomImplementation.__name__+"builder"
if name in moduleCache:
return moduleCache[name]
else:
mod = new.module(name)
objs = getDomBuilder(DomImplementation)
mod.__dict__.update(objs)
moduleCache[name] = mod
return mod
def getDomBuilder(DomImplementation):
Dom = DomImplementation
class AttrList:
def __init__(self, element):
self.element = element
def __iter__(self):
return self.element.attributes.items().__iter__()
def __setitem__(self, name, value):
self.element.setAttribute(name, value)
def items(self):
return [(item[0], item[1]) for item in
self.element.attributes.items()]
def keys(self):
return self.element.attributes.keys()
def __getitem__(self, name):
return self.element.getAttribute(name)
def __contains__(self, name):
if isinstance(name, tuple):
raise NotImplementedError
else:
return self.element.hasAttribute(name)
class NodeBuilder(_base.Node):
def __init__(self, element):
_base.Node.__init__(self, element.nodeName)
self.element = element
namespace = property(lambda self:hasattr(self.element, "namespaceURI")
and self.element.namespaceURI or None)
def appendChild(self, node):
node.parent = self
self.element.appendChild(node.element)
def insertText(self, data, insertBefore=None):
text = self.element.ownerDocument.createTextNode(data)
if insertBefore:
self.element.insertBefore(text, insertBefore.element)
else:
self.element.appendChild(text)
def insertBefore(self, node, refNode):
self.element.insertBefore(node.element, refNode.element)
node.parent = self
def removeChild(self, node):
if node.element.parentNode == self.element:
self.element.removeChild(node.element)
node.parent = None
def reparentChildren(self, newParent):
while self.element.hasChildNodes():
child = self.element.firstChild
self.element.removeChild(child)
newParent.element.appendChild(child)
self.childNodes = []
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes:
for name, value in attributes.items():
if isinstance(name, tuple):
if name[0] is not None:
qualifiedName = (name[0] + ":" + name[1])
else:
qualifiedName = name[1]
self.element.setAttributeNS(name[2], qualifiedName,
value)
else:
self.element.setAttribute(
name, value)
attributes = property(getAttributes, setAttributes)
def cloneNode(self):
return NodeBuilder(self.element.cloneNode(False))
def hasContent(self):
return self.element.hasChildNodes()
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TreeBuilder(_base.TreeBuilder):
def documentClass(self):
self.dom = Dom.getDOMImplementation().createDocument(None,None,None)
return weakref.proxy(self)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
domimpl = Dom.getDOMImplementation()
doctype = domimpl.createDocumentType(name, publicId, systemId)
self.document.appendChild(NodeBuilder(doctype))
if Dom == minidom:
doctype.ownerDocument = self.dom
def elementClass(self, name, namespace=None):
if namespace is None and self.defaultNamespace is None:
node = self.dom.createElement(name)
else:
node = self.dom.createElementNS(namespace, name)
return NodeBuilder(node)
def commentClass(self, data):
return NodeBuilder(self.dom.createComment(data))
def fragmentClass(self):
return NodeBuilder(self.dom.createDocumentFragment())
def appendChild(self, node):
self.dom.appendChild(node.element)
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
return self.dom
def getFragment(self):
return _base.TreeBuilder.getFragment(self).element
def insertText(self, data, parent=None):
data=data
if parent <> self:
_base.TreeBuilder.insertText(self, data, parent)
else:
# HACK: allow text nodes as children of the document node
if hasattr(self.dom, '_child_node_types'):
if not Node.TEXT_NODE in self.dom._child_node_types:
self.dom._child_node_types=list(self.dom._child_node_types)
self.dom._child_node_types.append(Node.TEXT_NODE)
self.dom.appendChild(self.dom.createTextNode(data))
name = None
def testSerializer(element):
element.normalize()
rv = []
def serializeElement(element, indent=0):
if element.nodeType == Node.DOCUMENT_TYPE_NODE:
if element.name:
if element.publicId or element.systemId:
publicId = element.publicId or ""
systemId = element.systemId or ""
rv.append( """|%s<!DOCTYPE %s "%s" "%s">"""%(
' '*indent, element.name, publicId, systemId))
else:
rv.append("|%s<!DOCTYPE %s>"%(' '*indent, element.name))
else:
rv.append("|%s<!DOCTYPE >"%(' '*indent,))
elif element.nodeType == Node.DOCUMENT_NODE:
rv.append("#document")
elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
rv.append("#document-fragment")
elif element.nodeType == Node.COMMENT_NODE:
rv.append("|%s<!-- %s -->"%(' '*indent, element.nodeValue))
elif element.nodeType == Node.TEXT_NODE:
rv.append("|%s\"%s\"" %(' '*indent, element.nodeValue))
else:
if (hasattr(element, "namespaceURI") and
element.namespaceURI != None):
name = "%s %s"%(constants.prefixes[element.namespaceURI],
element.nodeName)
else:
name = element.nodeName
rv.append("|%s<%s>"%(' '*indent, name))
if element.hasAttributes():
attributes = []
for i in range(len(element.attributes)):
attr = element.attributes.item(i)
name = attr.nodeName
value = attr.value
ns = attr.namespaceURI
if ns:
name = "%s %s"%(constants.prefixes[ns], attr.localName)
else:
name = attr.nodeName
attributes.append((name, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' '*(indent+2), name, value))
indent += 2
for child in element.childNodes:
serializeElement(child, indent)
serializeElement(element, 0)
return "\n".join(rv)
def dom2sax(node, handler, nsmap={'xml':XML_NAMESPACE}):
if node.nodeType == Node.ELEMENT_NODE:
if not nsmap:
handler.startElement(node.nodeName, node.attributes)
for child in node.childNodes: dom2sax(child, handler, nsmap)
handler.endElement(node.nodeName)
else:
attributes = dict(node.attributes.itemsNS())
# gather namespace declarations
prefixes = []
for attrname in node.attributes.keys():
attr = node.getAttributeNode(attrname)
if (attr.namespaceURI == XMLNS_NAMESPACE or
(attr.namespaceURI == None and attr.nodeName.startswith('xmlns'))):
prefix = (attr.nodeName != 'xmlns' and attr.nodeName or None)
handler.startPrefixMapping(prefix, attr.nodeValue)
prefixes.append(prefix)
nsmap = nsmap.copy()
nsmap[prefix] = attr.nodeValue
del attributes[(attr.namespaceURI, attr.nodeName)]
# apply namespace declarations
for attrname in node.attributes.keys():
attr = node.getAttributeNode(attrname)
if attr.namespaceURI == None and ':' in attr.nodeName:
prefix = attr.nodeName.split(':')[0]
if nsmap.has_key(prefix):
del attributes[(attr.namespaceURI, attr.nodeName)]
attributes[(nsmap[prefix],attr.nodeName)]=attr.nodeValue
# SAX events
ns = node.namespaceURI or nsmap.get(None,None)
handler.startElementNS((ns,node.nodeName), node.nodeName, attributes)
for child in node.childNodes: dom2sax(child, handler, nsmap)
handler.endElementNS((ns, node.nodeName), node.nodeName)
for prefix in prefixes: handler.endPrefixMapping(prefix)
elif node.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
handler.characters(node.nodeValue)
elif node.nodeType == Node.DOCUMENT_NODE:
handler.startDocument()
for child in node.childNodes: dom2sax(child, handler, nsmap)
handler.endDocument()
elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
for child in node.childNodes: dom2sax(child, handler, nsmap)
else:
# ATTRIBUTE_NODE
# ENTITY_NODE
# PROCESSING_INSTRUCTION_NODE
# COMMENT_NODE
# DOCUMENT_TYPE_NODE
# NOTATION_NODE
pass
return locals()
# Keep backwards compatibility with things that directly load
# classes/functions from this module
for key, value in getDomModule(minidom).__dict__.items():
globals()[key] = value
|
|
from __future__ import absolute_import, division, print_function
import warnings
from collections import defaultdict
from threading import Lock
from distutils.version import LooseVersion
import numpy as np
from toolz import pluck
from scipy import sparse
from dask.base import normalize_token
from sklearn.exceptions import FitFailedWarning
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.utils import safe_indexing
from sklearn.utils.fixes import rankdata
from sklearn.utils.validation import check_consistent_length, _is_arraylike
from .utils import copy_estimator
# Copied from scikit-learn/sklearn/utils/fixes.py, can be removed once we drop
# support for scikit-learn < 0.18.1 or numpy < 1.12.0.
if LooseVersion(np.__version__) < '1.12.0':
class MaskedArray(np.ma.MaskedArray):
# Before numpy 1.12, np.ma.MaskedArray object is not picklable
# This fix is needed to make our model_selection.GridSearchCV
# picklable as the ``cv_results_`` param uses MaskedArray
def __getstate__(self):
"""Return the internal state of the masked array, for pickling
purposes.
"""
cf = 'CF'[self.flags.fnc]
data_state = super(np.ma.MaskedArray, self).__reduce__()[2]
return data_state + (np.ma.getmaskarray(self).tostring(cf),
self._fill_value)
else:
from numpy.ma import MaskedArray # noqa
# A singleton to indicate a missing parameter
MISSING = type('MissingParameter', (object,),
{'__slots__': (),
'__reduce__': lambda self: 'MISSING',
'__doc__': "A singleton to indicate a missing parameter"})()
normalize_token.register(type(MISSING), lambda x: 'MISSING')
# A singleton to indicate a failed estimator fit
FIT_FAILURE = type('FitFailure', (object,),
{'__slots__': (),
'__reduce__': lambda self: 'FIT_FAILURE',
'__doc__': "A singleton to indicate fit failure"})()
def warn_fit_failure(error_score, e):
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
# ----------------------- #
# Functions in the graphs #
# ----------------------- #
class CVCache(object):
def __init__(self, splits, pairwise=False, cache=True):
self.splits = splits
self.pairwise = pairwise
self.cache = {} if cache else None
def __reduce__(self):
return (CVCache, (self.splits, self.pairwise, self.cache is not None))
def num_test_samples(self):
return np.array([i.sum() if i.dtype == bool else len(i)
for i in pluck(1, self.splits)])
def extract(self, X, y, n, is_x=True, is_train=True):
if is_x:
if self.pairwise:
return self._extract_pairwise(X, y, n, is_train=is_train)
return self._extract(X, y, n, is_x=True, is_train=is_train)
if y is None:
return None
return self._extract(X, y, n, is_x=False, is_train=is_train)
def extract_param(self, key, x, n):
if self.cache is not None and (n, key) in self.cache:
return self.cache[n, key]
out = safe_indexing(x, self.splits[n][0]) if _is_arraylike(x) else x
if self.cache is not None:
self.cache[n, key] = out
return out
def _extract(self, X, y, n, is_x=True, is_train=True):
if self.cache is not None and (n, is_x, is_train) in self.cache:
return self.cache[n, is_x, is_train]
inds = self.splits[n][0] if is_train else self.splits[n][1]
result = safe_indexing(X if is_x else y, inds)
if self.cache is not None:
self.cache[n, is_x, is_train] = result
return result
def _extract_pairwise(self, X, y, n, is_train=True):
if self.cache is not None and (n, True, is_train) in self.cache:
return self.cache[n, True, is_train]
if not hasattr(X, "shape"):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
train, test = self.splits[n]
result = X[np.ix_(train if is_train else test, train)]
if self.cache is not None:
self.cache[n, True, is_train] = result
return result
def cv_split(cv, X, y, groups, is_pairwise, cache):
check_consistent_length(X, y, groups)
return CVCache(list(cv.split(X, y, groups)), is_pairwise, cache)
def cv_n_samples(cvs):
return cvs.num_test_samples()
def cv_extract(cvs, X, y, is_X, is_train, n):
return cvs.extract(X, y, n, is_X, is_train)
def cv_extract_params(cvs, keys, vals, n):
return {k: cvs.extract_param(tok, v, n) for (k, tok), v in zip(keys, vals)}
def decompress_params(fields, params):
return [{k: v for k, v in zip(fields, p) if v is not MISSING}
for p in params]
def pipeline(names, steps):
"""Reconstruct a Pipeline from names and steps"""
if any(s is FIT_FAILURE for s in steps):
return FIT_FAILURE
return Pipeline(list(zip(names, steps)))
def feature_union(names, steps, weights):
"""Reconstruct a FeatureUnion from names, steps, and weights"""
if any(s is FIT_FAILURE for s in steps):
return FIT_FAILURE
return FeatureUnion(list(zip(names, steps)),
transformer_weights=weights)
def feature_union_concat(Xs, nsamples, weights):
"""Apply weights and concatenate outputs from a FeatureUnion"""
if any(x is FIT_FAILURE for x in Xs):
return FIT_FAILURE
Xs = [X if w is None else X * w for X, w in zip(Xs, weights)
if X is not None]
if not Xs:
return np.zeros((nsamples, 0))
if any(sparse.issparse(f) for f in Xs):
return sparse.hstack(Xs).tocsr()
return np.hstack(Xs)
# Current set_params isn't threadsafe
SET_PARAMS_LOCK = Lock()
def set_params(est, fields=None, params=None, copy=True):
if copy:
est = copy_estimator(est)
if fields is None:
return est
params = {f: p for (f, p) in zip(fields, params) if p is not MISSING}
# TODO: rewrite set_params to avoid lock for classes that use the standard
# set_params/get_params methods
with SET_PARAMS_LOCK:
return est.set_params(**params)
def fit(est, X, y, error_score='raise', fields=None, params=None,
fit_params=None):
if est is FIT_FAILURE or X is FIT_FAILURE:
return FIT_FAILURE
if not fit_params:
fit_params = {}
try:
est = set_params(est, fields, params)
est.fit(X, y, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
warn_fit_failure(error_score, e)
est = FIT_FAILURE
return est
def fit_transform(est, X, y, error_score='raise', fields=None, params=None,
fit_params=None):
if est is FIT_FAILURE or X is FIT_FAILURE:
return FIT_FAILURE, FIT_FAILURE
if not fit_params:
fit_params = {}
try:
est = set_params(est, fields, params)
if hasattr(est, 'fit_transform'):
Xt = est.fit_transform(X, y, **fit_params)
else:
est.fit(X, y, **fit_params)
Xt = est.transform(X)
except Exception as e:
if error_score == 'raise':
raise
warn_fit_failure(error_score, e)
est = Xt = FIT_FAILURE
return est, Xt
def _score(est, X, y, scorer):
if est is FIT_FAILURE:
return FIT_FAILURE
return scorer(est, X) if y is None else scorer(est, X, y)
def score(est, X_test, y_test, X_train, y_train, scorer):
test_score = _score(est, X_test, y_test, scorer)
if X_train is None:
return test_score
train_score = _score(est, X_train, y_train, scorer)
return test_score, train_score
def fit_and_score(est, cv, X, y, n, scorer,
error_score='raise', fields=None, params=None,
fit_params=None, return_train_score=True):
X_train = cv.extract(X, y, n, True, True)
y_train = cv.extract(X, y, n, False, True)
X_test = cv.extract(X, y, n, True, False)
y_test = cv.extract(X, y, n, False, False)
est = fit(est, X_train, y_train, error_score, fields, params, fit_params)
if not return_train_score:
X_train = y_train = None
return score(est, X_test, y_test, X_train, y_train, scorer)
def _store(results, key_name, array, n_splits, n_candidates,
weights=None, splits=False, rank=False):
"""A small helper to store the scores/times to the cv_results_"""
# When iterated first by n_splits and then by parameters
array = np.array(array, dtype=np.float64).reshape(n_splits, n_candidates).T
if splits:
for split_i in range(n_splits):
results["split%d_%s" % (split_i, key_name)] = array[:, split_i]
array_means = np.average(array, axis=1, weights=weights)
results['mean_%s' % key_name] = array_means
# Weighted std is not directly available in numpy
array_stds = np.sqrt(np.average((array - array_means[:, np.newaxis]) ** 2,
axis=1, weights=weights))
results['std_%s' % key_name] = array_stds
if rank:
results["rank_%s" % key_name] = np.asarray(
rankdata(-array_means, method='min'), dtype=np.int32)
def create_cv_results(scores, candidate_params, n_splits, error_score, weights):
if isinstance(scores[0], tuple):
test_scores, train_scores = zip(*scores)
else:
test_scores = scores
train_scores = None
test_scores = [error_score if s is FIT_FAILURE else s for s in test_scores]
if train_scores is not None:
train_scores = [error_score if s is FIT_FAILURE else s
for s in train_scores]
# Construct the `cv_results_` dictionary
results = {'params': candidate_params}
n_candidates = len(candidate_params)
if weights is not None:
weights = np.broadcast_to(weights[None, :],
(len(candidate_params), len(weights)))
_store(results, 'test_score', test_scores, n_splits, n_candidates,
splits=True, rank=True, weights=weights)
if train_scores is not None:
_store(results, 'train_score', train_scores,
n_splits, n_candidates, splits=True)
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(lambda: MaskedArray(np.empty(n_candidates),
mask=True,
dtype=object))
for cand_i, params in enumerate(candidate_params):
for name, value in params.items():
param_results["param_%s" % name][cand_i] = value
results.update(param_results)
return results
def get_best_params(candidate_params, cv_results):
best_index = np.flatnonzero(cv_results["rank_test_score"] == 1)[0]
return candidate_params[best_index]
def fit_best(estimator, params, X, y, fit_params):
estimator = copy_estimator(estimator).set_params(**params)
estimator.fit(X, y, **fit_params)
return estimator
|
|
# -*- coding: ISO-8859-1 -*-
from .MidiOutStream import MidiOutStream
from .RawOutstreamFile import RawOutstreamFile
from .constants import *
from .DataTypeConverters import fromBytes, writeVar
class MidiOutFile(MidiOutStream):
"""
MidiOutFile is an eventhandler that subclasses MidiOutStream.
"""
def __init__(self, raw_out=''):
self.raw_out = RawOutstreamFile(raw_out)
MidiOutStream.__init__(self)
def write(self):
self.raw_out.write()
def event_slice(self, slc):
"""
Writes the slice of an event to the current track. Correctly
inserting a varlen timestamp too.
"""
trk = self._current_track_buffer
trk.writeVarLen(self.rel_time())
trk.writeSlice(slc)
#####################
## Midi events
def note_on(self, channel=0, note=0x40, velocity=0x40):
"""
channel: 0-15
note, velocity: 0-127
"""
slc = fromBytes([NOTE_ON + channel, note, velocity])
self.event_slice(slc)
def note_off(self, channel=0, note=0x40, velocity=0x40):
"""
channel: 0-15
note, velocity: 0-127
"""
slc = fromBytes([NOTE_OFF + channel, note, velocity])
self.event_slice(slc)
def aftertouch(self, channel=0, note=0x40, velocity=0x40):
"""
channel: 0-15
note, velocity: 0-127
"""
slc = fromBytes([AFTERTOUCH + channel, note, velocity])
self.event_slice(slc)
def continuous_controller(self, channel, controller, value):
"""
channel: 0-15
controller, value: 0-127
"""
slc = fromBytes([CONTINUOUS_CONTROLLER + channel, controller, value])
self.event_slice(slc)
# These should probably be implemented
# http://users.argonet.co.uk/users/lenny/midi/tech/spec.html#ctrlnums
def patch_change(self, channel, patch):
"""
channel: 0-15
patch: 0-127
"""
slc = fromBytes([PATCH_CHANGE + channel, patch])
self.event_slice(slc)
def channel_pressure(self, channel, pressure):
"""
channel: 0-15
pressure: 0-127
"""
slc = fromBytes([CHANNEL_PRESSURE + channel, pressure])
self.event_slice(slc)
def pitch_bend(self, channel, value):
"""
channel: 0-15
value: 0-16383
"""
msb = (value>>7) & 0xFF
lsb = value & 0xFF
slc = fromBytes([PITCH_BEND + channel, msb, lsb])
self.event_slice(slc)
#####################
## System Exclusive
# def sysex_slice(sysex_type, data):
# ""
# sysex_len = writeVar(len(data)+1)
# self.event_slice(SYSTEM_EXCLUSIVE + sysex_len + data + END_OFF_EXCLUSIVE)
#
def system_exclusive(self, data):
"""
data: list of values in range(128)
"""
sysex_len = writeVar(len(data)+1)
self.event_slice(chr(SYSTEM_EXCLUSIVE) + sysex_len + data + chr(END_OFF_EXCLUSIVE))
#####################
## Common events
def midi_time_code(self, msg_type, values):
"""
msg_type: 0-7
values: 0-15
"""
value = (msg_type<<4) + values
self.event_slice(fromBytes([MIDI_TIME_CODE, value]))
def song_position_pointer(self, value):
"""
value: 0-16383
"""
lsb = (value & 0x7F)
msb = (value >> 7) & 0x7F
self.event_slice(fromBytes([SONG_POSITION_POINTER, lsb, msb]))
def song_select(self, songNumber):
"""
songNumber: 0-127
"""
self.event_slice(fromBytes([SONG_SELECT, songNumber]))
def tuning_request(self):
"""
No values passed
"""
self.event_slice(chr(TUNING_REQUEST))
#########################
# header does not really belong here. But anyhoo!!!
def header(self, format=0, nTracks=1, division=96):
"""
format: type of midi file in [0,1,2]
nTracks: number of tracks. 1 track for type 0 file
division: timing division ie. 96 ppq.
"""
raw = self.raw_out
raw.writeSlice('MThd')
bew = raw.writeBew
bew(6, 4) # header size
bew(format, 2)
bew(nTracks, 2)
bew(division, 2)
def eof(self):
"""
End of file. No more events to be processed.
"""
# just write the file then.
self.write()
#####################
## meta events
def meta_slice(self, meta_type, data_slice):
"Writes a meta event"
slc = fromBytes([META_EVENT, meta_type]) + \
writeVar(len(data_slice)) + data_slice
self.event_slice(slc)
def meta_event(self, meta_type, data):
"""
Handles any undefined meta events
"""
self.meta_slice(meta_type, fromBytes(data))
def start_of_track(self, n_track=0):
"""
n_track: number of track
"""
self._current_track_buffer = RawOutstreamFile()
self.reset_time()
self._current_track += 1
def end_of_track(self):
"""
Writes the track to the buffer.
"""
raw = self.raw_out
raw.writeSlice(TRACK_HEADER)
track_data = self._current_track_buffer.getvalue()
# wee need to know size of track data.
eot_slice = writeVar(self.rel_time()) + fromBytes([META_EVENT, END_OF_TRACK, 0])
raw.writeBew(len(track_data)+len(eot_slice), 4)
# then write
raw.writeSlice(track_data)
raw.writeSlice(eot_slice)
def sequence_number(self, value):
"""
value: 0-65535
"""
self.meta_slice(meta_type, writeBew(value, 2))
def text(self, text):
"""
Text event
text: string
"""
self.meta_slice(TEXT, text)
def copyright(self, text):
"""
Copyright notice
text: string
"""
self.meta_slice(COPYRIGHT, text)
def sequence_name(self, text):
"""
Sequence/track name
text: string
"""
self.meta_slice(SEQUENCE_NAME, text)
def instrument_name(self, text):
"""
text: string
"""
self.meta_slice(INSTRUMENT_NAME, text)
def lyric(self, text):
"""
text: string
"""
self.meta_slice(LYRIC, text)
def marker(self, text):
"""
text: string
"""
self.meta_slice(MARKER, text)
def cuepoint(self, text):
"""
text: string
"""
self.meta_slice(CUEPOINT, text)
def midi_ch_prefix(self, channel):
"""
channel: midi channel for subsequent data
(deprecated in the spec)
"""
self.meta_slice(MIDI_CH_PREFIX, chr(channel))
def midi_port(self, value):
"""
value: Midi port (deprecated in the spec)
"""
self.meta_slice(MIDI_CH_PREFIX, chr(value))
def tempo(self, value):
"""
value: 0-2097151
tempo in us/quarternote
(to calculate value from bpm: int(60,000,000.00 / BPM))
"""
hb, mb, lb = (value>>16 & 0xff), (value>>8 & 0xff), (value & 0xff)
self.meta_slice(TEMPO, fromBytes([hb, mb, lb]))
def smtp_offset(self, hour, minute, second, frame, framePart):
"""
hour,
minute,
second: 3 bytes specifying the hour (0-23), minutes (0-59) and
seconds (0-59), respectively. The hour should be
encoded with the SMPTE format, just as it is in MIDI
Time Code.
frame: A byte specifying the number of frames per second (one
of : 24, 25, 29, 30).
framePart: A byte specifying the number of fractional frames,
in 100ths of a frame (even in SMPTE-based tracks
using a different frame subdivision, defined in the
MThd chunk).
"""
self.meta_slice(SMTP_OFFSET, fromBytes([hour, minute, second, frame, framePart]))
def time_signature(self, nn, dd, cc, bb):
"""
nn: Numerator of the signature as notated on sheet music
dd: Denominator of the signature as notated on sheet music
The denominator is a negative power of 2: 2 = quarter
note, 3 = eighth, etc.
cc: The number of MIDI clocks in a metronome click
bb: The number of notated 32nd notes in a MIDI quarter note
(24 MIDI clocks)
"""
self.meta_slice(TIME_SIGNATURE, fromBytes([nn, dd, cc, bb]))
def key_signature(self, sf, mi):
"""
sf: is a byte specifying the number of flats (-ve) or sharps
(+ve) that identifies the key signature (-7 = 7 flats, -1
= 1 flat, 0 = key of C, 1 = 1 sharp, etc).
mi: is a byte specifying a major (0) or minor (1) key.
"""
self.meta_slice(KEY_SIGNATURE, fromBytes([sf, mi]))
def sequencer_specific(self, data):
"""
data: The data as byte values
"""
self.meta_slice(SEQUENCER_SPECIFIC, data)
# #####################
# ## realtime events
# These are of no use in a midi file, so they are ignored!!!
# def timing_clock(self):
# def song_start(self):
# def song_stop(self):
# def song_continue(self):
# def active_sensing(self):
# def system_reset(self):
if __name__ == '__main__':
out_file = 'test/midifiles/midiout.mid'
midi = MidiOutFile(out_file)
#format: 0, nTracks: 1, division: 480
#----------------------------------
#
#Start - track #0
#sequence_name: Type 0
#tempo: 500000
#time_signature: 4 2 24 8
#note_on - ch:00, note:48, vel:64 time:0
#note_off - ch:00, note:48, vel:40 time:480
#End of track
#
#End of file
midi.header(0, 1, 480)
midi.start_of_track()
midi.sequence_name('Type 0')
midi.tempo(750000)
midi.time_signature(4, 2, 24, 8)
ch = 0
for i in range(127):
midi.note_on(ch, i, 0x64)
midi.update_time(96)
midi.note_off(ch, i, 0x40)
midi.update_time(0)
midi.update_time(0)
midi.end_of_track()
midi.eof() # currently optional, should it do the write instead of write??
midi.write()
|
|
# Copyright Yair Benita Y.Benita@pharm.uu.nl
# Biopython (http://biopython.org) license applies
"""Simple protein analysis.
Example,
X = ProteinAnalysis("MAEGEITTFTALTEKFNLPPGNYKKPKLLYCSNGGHFLRILPDGTVDGTRDRSDQHIQLQLSAESVGEVYIKSTETGQYLAMDTSGLLYGSQTPSEECLFLERLEENHYNTYTSKKHAEKNWFVGLKKNGSCKRGPRTHYGQKAILFLPLPV")
print X.count_amino_acids()
print X.get_amino_acids_percent()
print X.molecular_weight()
print X.aromaticity()
print X.instability_index()
print X.flexibility()
print X.isoelectric_point()
print X.secondary_structure_fraction()
print X.protein_scale(ProtParamData.kd, 9, 0.4)
"""
import sys
import ProtParamData, IsoelectricPoint
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
from Bio.Data import IUPACData
class ProteinAnalysis(object):
"""Class containing methods for protein analysis.
The constructor takes one argument: the protein sequence as a
string and builds a sequence object using the Bio.Seq module. This is done
just to make sure the sequence is a protein sequence and not anything else.
"""
def __init__(self, prot_sequence):
if prot_sequence.islower():
self.sequence = Seq(prot_sequence.upper(), IUPAC.protein)
else:
self.sequence = Seq(prot_sequence, IUPAC.protein)
self.amino_acids_content = None
self.amino_acids_percent = None
self.length = len(self.sequence)
def count_amino_acids(self):
"""Count standard amino acids, returns a dict.
Counts the number times each amino acid is in the protein
sequence. Returns a dictionary {AminoAcid:Number}.
The return value is cached in self.amino_acids_content.
It is not recalculated upon subsequent calls.
"""
if self.amino_acids_content is None:
prot_dic = dict([(k, 0) for k in IUPACData.protein_letters])
for aa in prot_dic:
prot_dic[aa] = self.sequence.count(aa)
self.amino_acids_content = prot_dic
return self.amino_acids_content
def get_amino_acids_percent(self):
"""Calculate the amino acid content in percentages.
The same as count_amino_acids only returns the Number in percentage of
entire sequence. Returns a dictionary of {AminoAcid:percentage}.
The return value is cached in self.amino_acids_percent.
input is the dictionary self.amino_acids_content.
output is a dictionary with amino acids as keys.
"""
if self.amino_acids_percent is None:
aa_counts = self.count_amino_acids()
percentages = {}
for aa in aa_counts:
percentages[aa] = aa_counts[aa] / float(self.length)
self.amino_acids_percent = percentages
return self.amino_acids_percent
def molecular_weight (self):
"""Calculate MW from Protein sequence"""
# make local dictionary for speed
aa_weights = {}
for i in IUPACData.protein_weights:
# remove a molecule of water from the amino acid weight
aa_weights[i] = IUPACData.protein_weights[i] - 18.02
total_weight = 18.02 # add just one water molecule for the whole sequence
for aa in self.sequence:
total_weight += aa_weights[aa]
return total_weight
def aromaticity(self):
"""Calculate the aromaticity according to Lobry, 1994.
Calculates the aromaticity value of a protein according to Lobry, 1994.
It is simply the relative frequency of Phe+Trp+Tyr.
"""
aromatic_aas = 'YWF'
aa_percentages = self.get_amino_acids_percent()
aromaticity = sum([aa_percentages[aa] for aa in aromatic_aas])
return aromaticity
def instability_index(self):
"""Calculate the instability index according to Guruprasad et al 1990.
Implementation of the method of Guruprasad et al. 1990 to test a
protein for stability. Any value above 40 means the protein is unstable
(has a short half life).
See: Guruprasad K., Reddy B.V.B., Pandit M.W.
Protein Engineering 4:155-161(1990).
"""
index = ProtParamData.DIWV
score = 0.0
for i in range(self.length - 1):
this, next = self.sequence[i:i+2]
dipeptide_value = index[this][next]
score += dipeptide_value
return (10.0 / self.length) * score
def flexibility(self):
"""Calculate the flexibility according to Vihinen, 1994.
No argument to change window size because parameters are specific for a
window=9. The parameters used are optimized for determining the flexibility.
"""
flexibilities = ProtParamData.Flex
window_size = 9
weights = [0.25, 0.4375, 0.625, 0.8125, 1]
scores = []
for i in range(self.length - window_size):
subsequence = self.sequence[i:i+window_size]
score = 0.0
for j in range(window_size // 2):
front = subsequence[j]
back = subsequence[window_size - j - 1]
score += (flexibilities[front] + flexibilities[back]) * weights[j]
middle = subsequence[window_size // 2 + 1]
score += flexibilities[middle]
scores.append(score / 5.25)
return scores
def gravy(self):
"""Calculate the gravy according to Kyte and Doolittle."""
total_gravy = sum(ProtParamData.kd[aa] for aa in self.sequence)
return total_gravy / self.length
def _weight_list(self, window, edge):
"""Makes a list of relative weight of the
window edges compared to the window center. The weights are linear.
it actually generates half a list. For a window of size 9 and edge 0.4
you get a list of [0.4, 0.55, 0.7, 0.85].
"""
unit = 2 * (1.0 - edge) / (window - 1)
weights = [0.0] * (window // 2)
for i in range(window // 2):
weights[i] = edge + unit * i
return weights
def protein_scale(self, param_dict, window, edge=1.0):
"""Compute a profile by any amino acid scale.
An amino acid scale is defined by a numerical value assigned to each type of
amino acid. The most frequently used scales are the hydrophobicity or
hydrophilicity scales and the secondary structure conformational parameters
scales, but many other scales exist which are based on different chemical and
physical properties of the amino acids. You can set several parameters that
control the computation of a scale profile, such as the window size and the
window edge relative weight value.
WindowSize: The window size is the length
of the interval to use for the profile computation. For a window size n, we
use the i-(n-1)/2 neighboring residues on each side to compute
the score for residue i. The score for residue i is the sum of the scaled values
for these amino acids, optionally weighted according to their position in the
window.
Edge: The central amino acid of the window always has a weight of 1.
By default, the amino acids at the remaining window positions have the same
weight, but you can make the residue at the center of the window have a
larger weight than the others by setting the edge value for the residues at
the beginning and end of the interval to a value between 0 and 1. For
instance, for Edge=0.4 and a window size of 5 the weights will be: 0.4, 0.7,
1.0, 0.7, 0.4.
The method returns a list of values which can be plotted to
view the change along a protein sequence. Many scales exist. Just add your
favorites to the ProtParamData modules.
Similar to expasy's ProtScale: http://www.expasy.org/cgi-bin/protscale.pl
"""
# generate the weights
# _weight_list returns only one tail. If the list should be [0.4,0.7,1.0,0.7,0.4]
# what you actually get from _weights_list is [0.4,0.7]. The correct calculation is done
# in the loop.
weights = self._weight_list(window, edge)
scores = []
# the score in each Window is divided by the sum of weights
# (* 2 + 1) since the weight list is one sided:
sum_of_weights = sum(weights) * 2 + 1
for i in range(self.length - window + 1):
subsequence = self.sequence[i:i+window]
score = 0.0
for j in range(window // 2):
# walk from the outside of the Window towards the middle.
# Iddo: try/except clauses added to avoid raising an exception on a non-standard amino acid
try:
front = param_dict[subsequence[j]]
back = param_dict[subsequence[window - j - 1]]
score += weights[j] * front + weights[j] * back
except KeyError:
sys.stderr.write('warning: %s or %s is not a standard amino acid.\n' %
(subsequence[j], subsequence[window - j - 1]))
# Now add the middle value, which always has a weight of 1.
middle = subsequence[window // 2]
if middle in param_dict:
score += param_dict[middle]
else:
sys.stderr.write('warning: %s is not a standard amino acid.\n' % (middle))
scores.append(score / sum_of_weights)
return scores
def isoelectric_point(self):
"""Calculate the isoelectric point.
Uses the module IsoelectricPoint to calculate the pI of a protein.
"""
aa_content = self.count_amino_acids()
ie_point = IsoelectricPoint.IsoelectricPoint(self.sequence, aa_content)
return ie_point.pi()
def secondary_structure_fraction (self):
"""Calculate fraction of helix, turn and sheet.
Returns a list of the fraction of amino acids which tend
to be in Helix, Turn or Sheet.
Amino acids in helix: V, I, Y, F, W, L.
Amino acids in Turn: N, P, G, S.
Amino acids in sheet: E, M, A, L.
Returns a tuple of three integers (Helix, Turn, Sheet).
"""
aa_percentages = self.get_amino_acids_percent()
helix = sum([aa_percentages[r] for r in 'VIYFWL'])
turn = sum([aa_percentages[r] for r in 'NPGS'])
sheet = sum([aa_percentages[r] for r in 'EMAL'])
return helix, turn, sheet
|
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# generate_gaze_data.py
"""
Generate specific idealized gaze patterns for several subjects to use them
as benchmark for the different coherence algorithms.
All functions return a sequence of n elements. Each element is a nx3
np.array containing the (x, y, t) data points for one subject.
Time is given in micro seconds (us).
"""
import pickle
import numpy as np
SCREEN_RES = (1280, 720)
SAMPLING_RATE = 50 #Hz
DELTA_T = 1./float(SAMPLING_RATE) * 1000000
SD_NOISE = 5
def random_coherent_random(n, dt, t1, t2):
"""
generate random coherent random data.
Parameters
----------
n : integer
number of subjects
dt : float
time intervall in micro seconds
t1 : float
point in time where coherent phase begins
t2 : float
point in time where coherent phase ends
Returns
-------
subjects : sequence of np.arrays
"""
subjects = list()
time_steps = int(dt/DELTA_T)
x_coherent = np.random.normal(SCREEN_RES[0]/2., SCREEN_RES[0]/6.)
y_coherent = np.random.normal(SCREEN_RES[1]/2., SCREEN_RES[1]/6.)
for i in range(n):
subject = list()
state = None
for j in range(time_steps):
t = j * DELTA_T
x_noise = np.random.normal(0, SD_NOISE)
y_noise = np.random.normal(0, SD_NOISE)
# random
if t < t1:
if not state == "random1":
state = "random1"
x_state = np.random.normal(SCREEN_RES[0]/2., SCREEN_RES[0]/6.)
y_state = np.random.normal(SCREEN_RES[1]/2., SCREEN_RES[1]/6.)
x = x_state + x_noise
y = y_state + y_noise
elif t < t2:
if not state == "coherent":
state = "coherent"
x = x_coherent + x_noise
y = y_coherent + y_noise
else:
if not state == "random2":
state = "random2"
x_state = np.random.normal(SCREEN_RES[0]/2., SCREEN_RES[0]/6.)
y_state = np.random.normal(SCREEN_RES[1]/2., SCREEN_RES[1]/6.)
x = x_state + x_noise
y = y_state + y_noise
subject.append((x, y, t))
subject = np.array(subject)
subjects.append(subject)
return subjects
def random_intervals(n, dt, ts):
"""
generate random coherent random data.
Parameters
----------
n : integer
number of subjects
dt : float
time intervall in micro seconds
ts : list of float
points in time where random phase switches
Returns
-------
subjects : sequence of np.arrays
"""
subjects = list()
time_steps = int(dt/DELTA_T)
for i in range(n):
subject = list()
state = None
for j in range(time_steps):
t = j * DELTA_T
x_noise = np.random.normal(0, SD_NOISE)
y_noise = np.random.normal(0, SD_NOISE)
for t_int in ts:
if t < t_int:
if not state == t_int:
state = t_int
x_state = np.random.normal(SCREEN_RES[0]/2., SCREEN_RES[0]/6.)
y_state = np.random.normal(SCREEN_RES[1]/2., SCREEN_RES[1]/6.)
x = x_state + x_noise
y = y_state + y_noise
break
if t > ts[-1]:
if not state == "last":
state = "last"
x_state = np.random.normal(SCREEN_RES[0]/2., SCREEN_RES[0]/6.)
y_state = np.random.normal(SCREEN_RES[1]/2., SCREEN_RES[1]/6.)
x = x_state + x_noise
y = y_state + y_noise
subject.append((x, y, t))
subject = np.array(subject)
subjects.append(subject)
return subjects
def find_the_needle(n, dt_interval, n_before, n_after):
"""
generate random coherent random data.
The total time interval is equal to dt_interval*(n_before+n_after+1).
Parameters
----------
n : integer
number of subjects
dt_interval : float
length of single interval in micro seconds
n_before : integer
number of intervals before the coherent interval
n_after : integer
number of intervals after the coherent interval
Returns
-------
subjects : sequence of np.arrays
"""
subjects = list()
dt = dt_interval * (n_before + 1 + n_after)
print("dt: %f" % dt)
time_steps = int(dt/DELTA_T)
x_coherent = np.random.normal(SCREEN_RES[0]/2., SCREEN_RES[0]/6.)
y_coherent = np.random.normal(SCREEN_RES[1]/2., SCREEN_RES[1]/6.)
for i in range(n):
subject = list()
state = None
for j in range(time_steps):
t = j * DELTA_T
x_noise = np.random.normal(0, SD_NOISE)
y_noise = np.random.normal(0, SD_NOISE)
for k in range(n_before):
t_int = (k+1) * dt_interval
if t < t_int:
if not state == t_int:
state = t_int
x_state = np.random.normal(SCREEN_RES[0]/2., SCREEN_RES[0]/6.)
y_state = np.random.normal(SCREEN_RES[1]/2., SCREEN_RES[1]/6.)
x = x_state + x_noise
y = y_state + y_noise
break
if n_before*dt_interval < t < (n_before+1)*dt_interval:
if not state == "coherent":
state = "coherent"
x = x_coherent + x_noise
y = y_coherent + y_noise
if t > (n_before+1)*dt_interval:
for k in range(n_after):
t_int = (n_before + 1 + k+1) * dt_interval
if t < t_int:
if not state == t_int:
state = t_int
x_state = np.random.normal(SCREEN_RES[0]/2., SCREEN_RES[0]/6.)
y_state = np.random.normal(SCREEN_RES[1]/2., SCREEN_RES[1]/6.)
x = x_state + x_noise
y = y_state + y_noise
break
if t > (n_before+1+n_after)*dt_interval:
if not state == "last":
state = "last"
x_state = np.random.normal(SCREEN_RES[0]/2., SCREEN_RES[0]/6.)
y_state = np.random.normal(SCREEN_RES[1]/2., SCREEN_RES[1]/6.)
x = x_state + x_noise
y = y_state + y_noise
subject.append((x, y, t))
subject = np.array(subject)
subjects.append(subject)
return subjects
if __name__ == "__main__":
subjects = find_the_needle(20, 300000, 50, 50)
population = np.concatenate(subjects, axis=0)
pickle_file = "find_the_needle.pickle"
with open(pickle_file, "wb") as pfile:
obj = (subjects, population)
pickle.dump(obj, pfile)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for normalization layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import normalization
from tensorflow.python.keras.layers import normalization_v2
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class BatchNormalizationTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_basic_batchnorm(self):
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'momentum': 0.9,
'epsilon': 0.1,
'gamma_regularizer': keras.regularizers.l2(0.01),
'beta_regularizer': keras.regularizers.l2(0.01)
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'gamma_initializer': 'ones',
'beta_initializer': 'ones',
'moving_mean_initializer': 'zeros',
'moving_variance_initializer': 'ones'
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={'scale': False,
'center': False},
input_shape=(3, 3))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_batchnorm_weights(self):
layer = keras.layers.BatchNormalization(scale=False, center=False)
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.weights), 2)
layer = keras.layers.BatchNormalization()
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 4)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_batchnorm_regularization(self):
layer = keras.layers.BatchNormalization(
gamma_regularizer='l1', beta_regularizer='l1')
layer.build((None, 3, 4))
self.assertEqual(len(layer.losses), 2)
max_norm = keras.constraints.max_norm
layer = keras.layers.BatchNormalization(
gamma_constraint=max_norm, beta_constraint=max_norm)
layer.build((None, 3, 4))
self.assertEqual(layer.gamma.constraint, max_norm)
self.assertEqual(layer.beta.constraint, max_norm)
@keras_parameterized.run_all_keras_modes
def test_batchnorm_convnet(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=1, input_shape=(3, 4, 4), momentum=0.8)
model.add(norm)
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1))
np.testing.assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
@keras_parameterized.run_all_keras_modes
def test_batchnorm_convnet_channel_last(self):
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=-1, input_shape=(4, 4, 3), momentum=0.8)
model.add(norm)
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3))
np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1)
@keras_parameterized.run_all_keras_modes
def test_batchnorm_correctness(self):
_run_batchnorm_correctness_test(
normalization.BatchNormalization, dtype='float32')
_run_batchnorm_correctness_test(
normalization_v2.BatchNormalization, dtype='float32')
@keras_parameterized.run_all_keras_modes
def test_batchnorm_mixed_precision(self):
_run_batchnorm_correctness_test(
normalization.BatchNormalization, dtype='float16')
_run_batchnorm_correctness_test(
normalization_v2.BatchNormalization, dtype='float16')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
@testing_utils.enable_v2_dtype_behavior
def test_batchnorm_policy(self):
norm = keras.layers.BatchNormalization(
axis=-1,
input_shape=(4, 4, 3),
momentum=0.8,
dtype=policy.Policy('mixed_float16'))
x = np.random.normal(size=(10, 4, 4, 3))
y = norm(x)
self.assertEqual(y.dtype, 'float16')
self.assertEqual(norm.beta.dtype.base_dtype, 'float32')
self.assertEqual(norm.gamma.dtype.base_dtype, 'float32')
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_batchnorm_non_trainable_with_fit(self):
inputs = keras.Input((3,))
bn = normalization_v2.BatchNormalization()
outputs = bn(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(np.random.random((100, 3)), np.random.random((100, 3)))
test_data = np.random.random((10, 3))
test_targets = np.random.random((10, 3))
test_loss = model.evaluate(test_data, test_targets)
bn.trainable = False
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
train_loss = model.train_on_batch(test_data, test_targets)
self.assertAlmostEqual(test_loss, train_loss)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_batchnorm_non_trainable_with_tf_function(self):
inputs = keras.Input((3,))
bn = normalization_v2.BatchNormalization()
outputs = bn(inputs)
model = keras.Model(inputs, outputs)
loss_fn = keras.losses.MeanSquaredError()
optimizer = rmsprop_v2.RMSprop()
@def_function.function()
def train_step(x, y):
with backprop.GradientTape() as tape:
y_pred = model(x, training=True)
loss = loss_fn(y, y_pred)
grads = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
return loss
@def_function.function()
def test_step(x, y):
y_pred = model(x, training=False)
loss = loss_fn(y, y_pred)
return loss
train_step(np.random.random((100, 3)), np.random.random((100, 3)))
test_data = np.random.random((10, 3))
test_targets = np.random.random((10, 3))
test_loss = test_step(test_data, test_targets)
bn.trainable = False
train_loss = train_step(test_data, test_targets)
if context.executing_eagerly():
self.assertAlmostEqual(test_loss.numpy(), train_loss.numpy())
def test_eager_batchnorm_in_custom_model_call_with_tf_function(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.bn = keras.layers.BatchNormalization()
@def_function.function()
def call(self, x, training):
return self.bn(x, training=training)
with context.eager_mode():
model = MyModel()
for _ in range(10):
x = constant_op.constant(0.5, shape=[1, 1])
model(x, training=True)
# Make sure the moving mean and variance have been updated
self.assertAllClose(model.bn.moving_mean.numpy(), [0.047], atol=3e-3)
self.assertAllClose(model.bn.moving_variance.numpy(), [0.9], atol=3e-2)
class BatchNormalizationV1Test(keras_parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_v1_fused_attribute(self):
norm = normalization.BatchNormalization()
inp = keras.layers.Input((4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
norm = normalization.BatchNormalization(fused=False)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = normalization.BatchNormalization(virtual_batch_size=2)
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(2, 2, 2))
norm(inp)
self.assertEqual(norm.fused, False)
class BatchNormalizationV2Test(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_basic_batchnorm_v2(self):
testing_utils.layer_test(
normalization_v2.BatchNormalization,
kwargs={'fused': True},
input_shape=(3, 3, 3, 3))
testing_utils.layer_test(
normalization_v2.BatchNormalization,
kwargs={'fused': None},
input_shape=(3, 3, 3))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_v2_fused_attribute(self):
norm = normalization_v2.BatchNormalization()
self.assertEqual(norm.fused, None)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
norm = normalization_v2.BatchNormalization()
self.assertEqual(norm.fused, None)
inp = keras.layers.Input(shape=(4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = normalization_v2.BatchNormalization(virtual_batch_size=2)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = normalization_v2.BatchNormalization(fused=False)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = normalization_v2.BatchNormalization(fused=True, axis=[3])
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
with self.assertRaisesRegexp(ValueError, 'fused.*renorm'):
normalization_v2.BatchNormalization(fused=True, renorm=True)
with self.assertRaisesRegexp(ValueError, 'fused.*when axis is 1 or 3'):
normalization_v2.BatchNormalization(fused=True, axis=2)
with self.assertRaisesRegexp(ValueError, 'fused.*when axis is 1 or 3'):
normalization_v2.BatchNormalization(fused=True, axis=[1, 3])
with self.assertRaisesRegexp(ValueError, 'fused.*virtual_batch_size'):
normalization_v2.BatchNormalization(fused=True, virtual_batch_size=2)
with self.assertRaisesRegexp(ValueError, 'fused.*adjustment'):
normalization_v2.BatchNormalization(fused=True,
adjustment=lambda _: (1, 0))
norm = normalization_v2.BatchNormalization(fused=True)
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(4, 4))
with self.assertRaisesRegexp(ValueError, '4D input tensors'):
norm(inp)
def test_updates_in_wrap_function(self):
with context.eager_mode():
layer = keras.layers.BatchNormalization()
def my_func():
x = array_ops.ones((10, 1))
return layer(x, training=True)
wrapped_fn = wrap_function.wrap_function(my_func, [])
wrapped_fn()
# Updates should be tracked in a `wrap_function`.
self.assertLen(layer.updates, 2)
def _run_batchnorm_correctness_test(layer, dtype='float32', fused=False):
model = keras.models.Sequential()
model.add(keras.Input(shape=(2, 2, 2), dtype=dtype))
norm = layer(momentum=0.8, fused=fused)
model.add(norm)
if dtype == 'float16':
# Keras models require float32 losses.
model.add(keras.layers.Lambda(lambda x: keras.backend.cast(x, 'float32')))
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = (np.random.normal(loc=5.0, scale=10.0, size=(1000, 2, 2, 2))
.astype(dtype))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=2e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=2e-1)
@parameterized.parameters(
[normalization.BatchNormalization, normalization_v2.BatchNormalization])
class NormalizationLayersGraphModeOnlyTest(
test.TestCase, parameterized.TestCase):
def test_shared_batchnorm(self, layer):
"""Test that a BN layer can be shared across different data streams."""
with self.cached_session():
# Test single layer reuse
bn = layer()
x1 = keras.layers.Input(shape=(10,))
_ = bn(x1)
x2 = keras.layers.Input(shape=(10,))
y2 = bn(x2)
x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
model = keras.models.Model(x2, y2)
model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
model.train_on_batch(x, x)
self.assertLen(bn.updates, 4)
# Test model-level reuse
x3 = keras.layers.Input(shape=(10,))
y3 = model(x3)
new_model = keras.models.Model(x3, y3, name='new_model')
self.assertLen(new_model.updates, 6)
self.assertLen(model.updates, 6)
new_model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
new_model.train_on_batch(x, x)
def test_that_trainable_disables_updates(self, layer):
with self.cached_session():
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
a = keras.layers.Input(shape=(4,))
layer = layer(input_shape=(4,))
b = layer(a)
model = keras.models.Model(a, b)
model.trainable = False
assert not model.updates
model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
assert not model.updates
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
model.trainable = True
model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
assert model.updates
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
assert np.abs(np.sum(x1 - x2)) > 1e-5
layer.trainable = False
model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
assert not model.updates
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
def test_batchnorm_trainable(self, layer):
"""Tests that batchnorm layer is trainable when learning phase is enabled.
Computes mean and std for current inputs then
applies batch normalization using them.
Args:
layer: Either V1 or V2 of BatchNormalization layer.
"""
# TODO(fchollet): enable in all execution modes when issue with
# learning phase setting is resolved.
with ops.Graph().as_default(), self.cached_session():
bn_mean = 0.5
bn_std = 10.
val_a = np.expand_dims(np.arange(10.), axis=1)
def get_model(bn_mean, bn_std):
inp = keras.layers.Input(shape=(1,))
x = layer()(inp)
model1 = keras.models.Model(inp, x)
model1.set_weights([
np.array([1.]),
np.array([0.]),
np.array([bn_mean]),
np.array([bn_std**2])
])
return model1
# Simulates training-mode with trainable layer.
# Should use mini-batch statistics.
with keras.backend.learning_phase_scope(1):
model = get_model(bn_mean, bn_std)
model.compile(loss='mse', optimizer='rmsprop')
out = model.predict(val_a)
self.assertAllClose(
(val_a - np.mean(val_a)) / np.std(val_a), out, atol=1e-3)
def _run_layernorm_correctness_test(layer, dtype='float32'):
model = keras.models.Sequential()
model.add(keras.layers.Lambda(lambda x: math_ops.cast(x, dtype='float16')))
norm = layer(input_shape=(2, 2, 2), dtype=dtype)
model.add(norm)
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = (np.random.normal(loc=5.0, scale=10.0, size=(1000, 2, 2, 2))
.astype(dtype))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
class LayerNormalizationTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_basic_layernorm(self):
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={
'gamma_regularizer': keras.regularizers.l2(0.01),
'beta_regularizer': keras.regularizers.l2(0.01)
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={
'gamma_initializer': 'ones',
'beta_initializer': 'ones',
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'scale': False,
'center': False},
input_shape=(3, 3))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'axis': (-3, -2, -1)},
input_shape=(2, 8, 8, 3))
@keras_parameterized.run_all_keras_modes
def test_non_fused_layernorm(self):
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'axis': -2},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'axis': (-3, -2)},
input_shape=(2, 8, 8, 3))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'axis': (-3, -1)},
input_shape=(2, 8, 8, 3))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_layernorm_weights(self):
layer = keras.layers.LayerNormalization(scale=False, center=False)
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.weights), 0)
layer = keras.layers.LayerNormalization()
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 2)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_layernorm_regularization(self):
layer = keras.layers.LayerNormalization(
gamma_regularizer='l1', beta_regularizer='l1')
layer.build((None, 3, 4))
self.assertEqual(len(layer.losses), 2)
max_norm = keras.constraints.max_norm
layer = keras.layers.LayerNormalization(
gamma_constraint=max_norm, beta_constraint=max_norm)
layer.build((None, 3, 4))
self.assertEqual(layer.gamma.constraint, max_norm)
self.assertEqual(layer.beta.constraint, max_norm)
@keras_parameterized.run_all_keras_modes
def test_layernorm_convnet_channel_last(self):
model = keras.models.Sequential()
norm = keras.layers.LayerNormalization(input_shape=(4, 4, 3))
model.add(norm)
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3))
np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1)
@keras_parameterized.run_all_keras_modes
def test_layernorm_correctness(self):
_run_layernorm_correctness_test(
normalization.LayerNormalization, dtype='float32')
@keras_parameterized.run_all_keras_modes
def test_layernorm_mixed_precision(self):
_run_layernorm_correctness_test(
normalization.LayerNormalization, dtype='float16')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testIncorrectAxisType(self):
with self.assertRaisesRegexp(
TypeError, r'Expected an int or a list/tuple of ints'):
_ = normalization.LayerNormalization(axis={'axis': -1})
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testInvalidAxis(self):
with self.assertRaisesRegexp(ValueError, r'Invalid axis: 3'):
layer_norm = normalization.LayerNormalization(axis=3)
layer_norm.build(input_shape=(2, 2, 2))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testDuplicateAxis(self):
with self.assertRaisesRegexp(ValueError, r'Duplicate axis:'):
layer_norm = normalization.LayerNormalization(axis=[-1, -1])
layer_norm.build(input_shape=(2, 2, 2))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testFusedAttr(self):
layer_norm = normalization.LayerNormalization(axis=[-2, -1])
layer_norm.build(input_shape=(2, 2, 2))
self.assertEqual(layer_norm._fused, True)
class LayerNormalizationNumericsTest(keras_parameterized.TestCase):
"""Tests LayerNormalization has correct and numerically stable outputs."""
def _expected_layer_norm(self, x, beta, gamma, batch_input_shape, axis,
epsilon):
"""Returns the layer norm, which is computed using NumPy."""
broadcast_shape = [batch_input_shape[i] if i in axis else 1
for i in range(len(batch_input_shape))]
mean = np.mean(x, axis=axis, keepdims=True)
var = np.var(x, axis=axis, keepdims=True)
expected = (x - mean) / np.sqrt(var + epsilon)
expected *= np.reshape(gamma, broadcast_shape)
expected += np.reshape(beta, broadcast_shape)
return expected
def _test_forward_pass(self, batch_input_shape, axis, fp64_tol=1e-14,
fp32_tol=1e-6, fp16_tol=1e-2):
"""Tests the forward pass of layer normalization.
Args:
batch_input_shape: The input shape that will be used to test, including
the batch dimension.
axis: A list of axises to normalize. Will be passed to the `axis` argument
of LayerNormalization.
fp64_tol: The relative and absolute tolerance for float64.
fp32_tol: The relative and absolute tolerance for float32.
fp16_tol: The relative and absolute tolerance for float16.
"""
param_shape = [batch_input_shape[i] for i in axis]
param_elems = 1
for dim in param_shape:
param_elems *= dim
beta = np.arange(param_elems, dtype='float64').reshape(param_shape)
gamma = np.arange(1, param_elems + 1, dtype='float64').reshape(param_shape)
x = np.random.normal(size=batch_input_shape)
for epsilon in 1e-12, 1e-3:
expected = self._expected_layer_norm(x, beta, gamma, batch_input_shape,
axis, epsilon)
for dtype in 'float64', 'float32', 'float16':
norm = normalization.LayerNormalization(
axis=axis, dtype=dtype, batch_input_shape=batch_input_shape,
epsilon=epsilon, beta_initializer=keras.initializers.constant(beta),
gamma_initializer=keras.initializers.constant(gamma))
y = norm(keras.backend.cast(x, dtype))
actual = keras.backend.eval(y)
if dtype == 'float64':
tol = fp64_tol
elif dtype == 'float32':
tol = fp32_tol
else:
assert dtype == 'float16'
tol = fp16_tol
# We use absolute tolerances in addition to relative tolerances, because
# some of the values are very close to zero.
self.assertAllClose(expected, actual, rtol=tol, atol=tol)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_forward(self):
# For numeric stability, we ensure the axis's dimension(s) have at least 4
# elements.
self._test_forward_pass((4, 3), (0,))
self._test_forward_pass((3, 4), (1,))
self._test_forward_pass((4, 3, 2), (0,))
self._test_forward_pass((2, 4, 2), (1,))
self._test_forward_pass((2, 3, 4), (2,), fp16_tol=5e-2)
self._test_forward_pass((2, 3, 2), (0, 2))
self._test_forward_pass((2, 2, 2, 2), (1, 3))
self._test_forward_pass((2, 2, 2, 2), (2, 3))
self._test_forward_pass((2, 3, 4, 5), (3,))
def _test_backward_pass(self, batch_input_shape, axis, fp64_tol=1e-5,
fp32_tol=1e-5, fp16_tol=2e-2):
"""Tests the backwards pass of layer normalization.
Args:
batch_input_shape: The input shape that will be used to test, including
the batch dimension.
axis: A list of axises to normalize. Will be passed to the `axis` argument
of LayerNormalization.
fp64_tol: The relative and absolute tolerance for float64.
fp32_tol: The relative and absolute tolerance for float32.
fp16_tol: The relative and absolute tolerance for float16.
"""
param_shape = [batch_input_shape[i] for i in axis]
param_elems = 1
for dim in param_shape:
param_elems *= dim
beta = np.arange(param_elems, dtype='float64').reshape(param_shape)
gamma = np.arange(1, param_elems + 1, dtype='float64').reshape(param_shape)
x = np.random.normal(size=batch_input_shape)
for epsilon in 1e-12, 1e-3:
# Float64 must come first in this list, as we use the float64 numerical
# gradients to compare to the float32 and float16 symbolic gradients as
# well. Computing float32/float16 numerical gradients is too numerically
# unstable.
for dtype in 'float64', 'float32', 'float16':
norm = normalization.LayerNormalization(
axis=axis, dtype=dtype, batch_input_shape=batch_input_shape,
epsilon=epsilon, beta_initializer=keras.initializers.constant(beta),
gamma_initializer=keras.initializers.constant(gamma))
norm.build(x.shape)
# pylint: disable=cell-var-from-loop
def forward_fn(x, beta, gamma):
# We must monkey-patch the attributes of `norm` with the function
# arguments, so that the gradient checker will properly compute their
# gradients. The gradient checker computes gradients with respect to
# the input arguments of `f`.
with test.mock.patch.object(norm, 'beta', beta):
with test.mock.patch.object(norm, 'gamma', gamma):
return norm(x)
# pylint: enable=cell-var-from-loop
results = gradient_checker_v2.compute_gradient(
forward_fn, [keras.backend.cast(x, dtype), norm.beta, norm.gamma])
([x_grad_t, beta_grad_t, gamma_grad_t],
[x_grad_n, beta_grad_n, gamma_grad_n]) = results
if dtype == 'float64':
# We use the float64 numeric gradients as the reference, to compare
# against the symbolic gradients for all dtypes.
x_grad_ref = x_grad_n
beta_grad_ref = beta_grad_n
gamma_grad_ref = gamma_grad_n
tol = fp64_tol
elif dtype == 'float32':
tol = fp32_tol
else:
assert dtype == 'float16'
tol = fp16_tol
# We use absolute tolerances in addition to relative tolerances, because
# some of the values are very close to zero.
self.assertAllClose(x_grad_t, x_grad_ref, rtol=tol, atol=tol)
self.assertAllClose(beta_grad_t, beta_grad_ref, rtol=tol, atol=tol)
self.assertAllClose(gamma_grad_t, gamma_grad_ref, rtol=tol, atol=tol)
# The gradient_checker_v2 does not work properly with LayerNorm in graph mode.
@tf_test_util.run_v2_only
def test_backward(self):
# For numeric stability, we ensure the axis's dimension(s) have at least 4
# elements.
self._test_backward_pass((4, 3), (0,))
self._test_backward_pass((2, 4, 2), (1,))
self._test_backward_pass((2, 3, 4), (2,))
self._test_backward_pass((2, 3, 2), (0, 2), fp64_tol=5e-4, fp32_tol=5e-4)
self._test_backward_pass((2, 2, 2, 2), (1, 3))
self._test_backward_pass((2, 2, 2, 2), (2, 3))
if __name__ == '__main__':
test.main()
|
|
#!/usr/bin/env python
#
# Copyright 2013 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import inspect
import collections
import sys
from importlib import import_module
from gevent.event import AsyncResult
import gevent
from .util.decorator import EasyDecorator
from .irc import Message
if sys.version_info.major == 2:
str = unicode # noqa
__all__ = ['component_class',
'msg_parser',
'watches', 'observe', 'observes', 'handle', 'handles',
'every',
'triggers_on', 'keyword', 'keywords', 'trigger', 'triggers',
'ComponentManager']
#Used to mark classes for later inspection
CLASS_MARKER = '_PYAIB_COMPONENT'
def component_class(cls):
"""
Let the component loader know to load this class
If they pass a string argument to the decorator use it as a context
name for the instance
"""
if isinstance(cls, str):
context = cls
def wrapper(cls):
setattr(cls, CLASS_MARKER, context)
return cls
return wrapper
elif inspect.isclass(cls):
setattr(cls, CLASS_MARKER, True)
return cls
def _requires(*names):
def wrapper(cls):
cls.__requires__ = names
return cls
return wrapper
component_class.requires = _requires
def _get_plugs(method, kind):
""" Setup a place to put plugin hooks, allowing only one type per func """
if not hasattr(method, '__plugs__'):
method.__plugs__ = (kind, [])
elif method.__plugs__[0] != kind:
raise RuntimeError('Multiple Hook Types on a single method (%s)' %
method.__name__)
return method.__plugs__[1]
def msg_parser(*kinds, **kwargs):
"""
Defines that this method is a message type parser
@param kinds: List of IRC message types/numerics
@param kwargs: Accepts chain keyword, True or 'after' executes this after
the existing parser. 'before' execute before existing parsers.
default is to replace the existing parser
"""
chain = kwargs.pop('chain', False)
def wrapper(func):
parsers = _get_plugs(func, 'parsers')
parsers.extend([(kind, chain) for kind in kinds])
return func
return wrapper
def watches(*events):
""" Define a series of events to later be subscribed to """
def wrapper(func):
eplugs = _get_plugs(func, 'events')
eplugs.extend([event for event in events if event not in eplugs])
return func
return wrapper
observes = watches
observe = watches
handle = watches
handles = watches
class _Ignore(EasyDecorator):
"""Only pass if triggers is from user not ignored"""
def wrapper(dec, irc_c, msg, *args):
if dec.args and dec.kwargs.get('runtime'):
for attr in dec.args:
if hasattr(dec._instance, attr):
ignore_nicks = getattr(dec._instance, attr)
if isinstance(ignore_nicks, str)\
and msg.sender.nick == ignore_nicks:
return
elif isinstance(ignore_nicks, collections.Container)\
and msg.sender.nick in ignore_nicks:
return
elif dec.args and msg.sender.nick in dec.args:
return
return dec.call(irc_c, msg, *args)
watches.ignore = _Ignore
class _Channel(EasyDecorator):
"""Ignore triggers not in channels, or optionally a list of channels"""
def wrapper(dec, irc_c, msg, *args):
if msg.channel:
#Did they want to restrict which channels
#Should we lookup allowed channels at run time
if dec.args and dec.kwargs.get('runtime'):
ok = False
for attr in dec.args:
if hasattr(dec._instance, attr):
channel = getattr(dec._instance, attr)
if isinstance(channel, str)\
and msg.channel == channel:
ok = True
elif isinstance(channel, collections.Container)\
and msg.channel in channel:
ok = True
if not ok:
return
elif dec.args and msg.channel not in dec.args:
return
return dec.call(irc_c, msg, *args)
watches.channel = _Channel
def every(seconds, name=None):
""" Define a timer to execute every interval """
def wrapper(func):
timers = _get_plugs(func, 'timers')
timer = (name if name else func.__name__, seconds)
if timer not in timers:
timers.append(timer)
return func
return wrapper
class triggers_on(object):
"""Define a series of trigger words this method responds too"""
def __init__(self, *words):
self.words = words
def __call__(self, func):
triggers = _get_plugs(func, 'triggers')
triggers.extend(set([word for word in self.words
if word not in triggers]))
return func
class channel(EasyDecorator):
"""Ignore triggers not in channels, or optionally a list of channels"""
def wrapper(dec, irc_c, msg, trigger, args, kargs):
if msg.channel:
# Did they want to restrict which channels
# Should we lookup allowed channels at run time
if dec.args and dec.kwargs.get('runtime'):
ok = False
for attr in dec.args:
if hasattr(dec._instance, attr):
channel = getattr(dec._instance, attr)
if isinstance(channel, str)\
and msg.channel.lower() == channel:
ok = True
elif isinstance(channel, collections.Container)\
and msg.channel.lower() in channel:
ok = True
if not ok:
return
elif dec.args and msg.channel not in dec.args:
return
elif not dec.kwargs.get('private'):
return
return dec.call(irc_c, msg, trigger, args, kargs)
class private_or_channel(channel):
"""Allow either private or specified channel"""
def __init__(dec, *args, **kwargs):
kwargs['private'] = True
super(triggers_on.private_or_channel, dec).__init__(*args, **kwargs)
class private(EasyDecorator):
"""Only pass if triggers is from message not in a channel"""
def wrapper(dec, irc_c, msg, trigger, args, kargs):
if not msg.channel:
return dec.call(irc_c, msg, trigger, args, kargs)
class helponly(EasyDecorator):
"""Only provide help"""
def wrapper(dec, irc_c, msg, trigger, args, kargs):
msg.reply('%s %s' % (trigger,
irc_c.triggers._clean_doc(dec.__doc__)))
class autohelp(EasyDecorator):
"""Make --help trigger help"""
def wrapper(dec, irc_c, msg, trigger, args, kargs):
if 'help' in kargs or (args and args[0] == 'help'):
msg.reply('%s %s' % (trigger,
irc_c.triggers._clean_doc(dec.__doc__)))
else:
dec.call(irc_c, msg, trigger, args, kargs)
class autohelp_noargs(EasyDecorator):
"""Empty args / kargs trigger help"""
#It was impossible to call autohelp to decorate this method
def wrapper(dec, irc_c, msg, trigger, args, kargs):
if (not args and not kargs) or 'help' in kargs or (
args and args[0] == 'help'):
msg.reply('%s %s' % (trigger,
irc_c.triggers._clean_doc(dec.__doc__)))
else:
return dec.call(irc_c, msg, trigger, args, kargs)
class sub(EasyDecorator):
"""Handle only sub(words) for a given trigger"""
def __init__(dec, *words):
dec._subs = words
for word in words:
if not isinstance(word, str):
raise TypeError("sub word must be a string")
def wrapper(dec, irc_c, msg, trigger, args, kargs):
if args and args[0].lower() in dec._subs:
unparsed = msg.unparsed
msg = msg.copy(irc_c)
msg.unparsed = unparsed[len(args[0]) + 1:]
return dec.call(irc_c, msg, '%s %s' % (trigger,
args[0].lower()),
args[1:], kargs)
subs = sub
class nosub(EasyDecorator):
"""Prevent call if argument is present"""
def wrapper(dec, irc_c, msg, trigger, args, kargs):
if (not dec.args and args) or (dec.args and args
and args[0].lower() in dec.args):
return
else:
return dec.call(irc_c, msg, trigger, args, kargs)
nosubs = nosub
keyword = keywords = trigger = triggers = triggers_on
triggers.ignore = _Ignore
triggers.channel = _Channel
class ComponentManager(object):
""" Manage and Load all pyaib Components """
_loaded_components = collections.defaultdict(AsyncResult)
def __init__(self, context, config):
""" Needs a irc context and its config """
self.context = context
self.config = config
def load(self, name):
""" Load a python module as a component """
if self.is_loaded(name):
return
#Load top level config item matching component name
basename = name.split('.').pop()
config = self.context.config.setdefault(basename, {})
print("Loading Component %s..." % name)
ns = self._process_component(name, 'pyaib', CLASS_MARKER,
self.context, config)
self._loaded_components[basename].set(ns)
def _require(self, name):
self._loaded_components[name].wait()
def load_configured(self, autoload=None):
"""
Load all configured components autoload is a list of components
to always load
"""
components = []
if isinstance(autoload, (list, tuple, set)):
components.extend(autoload)
#Don't do duplicate loads
if self.config.load:
if not isinstance(self.config.load, list):
self.config.load = self.config.load.split(' ')
[components.append(comp) for comp in self.config.load
if comp not in components]
gevent.joinall([gevent.spawn(self.load, component)
for component in components])
def is_loaded(self, name):
""" Determine by name if a component is loaded """
return self._loaded_components[name].ready()
def _install_hooks(self, context, hooked_methods):
#Add All the hooks to the right place
for method in hooked_methods:
kind, args = method.__plugs__
if kind == 'events':
for event in args:
context.events(event).observe(method)
elif kind == 'triggers':
for word in args:
context.triggers(word).observe(method)
elif kind == 'timers':
for name, seconds in args:
context.timers.set(name, method, every=seconds)
elif kind == 'parsers':
for name, chain in args:
self._add_parsers(method, name, chain)
def _add_parsers(self, method, name, chain):
""" Handle Message parser adding and chaining """
if chain:
existing = Message.get_parser(name)
def _chain_after(msg, irc_c):
existing(msg, irc_c)
method(msg, irc_c)
def _chain_before(msg, irc_c):
method(msg, irc_c)
existing(msg, irc_c)
if existing and chain == 'before':
Message.add_parser(name, _chain_before)
elif existing:
Message.add_parser(name, _chain_after)
else:
Message.add_parser(name, method)
else:
Message.add_parser(name, method)
def _find_annotated_callables(self, class_marker, component_ns, config,
context):
annotated_callables = []
for name, member in inspect.getmembers(component_ns):
#Find Classes marked for loading
if inspect.isclass(member) and hasattr(member, class_marker):
#Handle Requirements
if hasattr(member, '__requires__'):
for req in member.__requires__:
self._require(req)
obj = member(context, config)
#Save the context for this obj if the class_marker is a str
context_name = getattr(obj, class_marker)
if isinstance(context_name, str):
context[context_name] = obj
#Search for hooked instance methods
for name, thing in inspect.getmembers(obj):
if (isinstance(thing, collections.Callable)
and hasattr(thing, '__plugs__')):
annotated_callables.append(thing)
#Find Functions with Hooks
if (isinstance(member, collections.Callable)
and hasattr(member, '__plugs__')):
annotated_callables.append(member)
return annotated_callables
def _process_component(self, name, path, class_marker, context, config):
if name.startswith('/'):
importname = name[1:]
path = None
else:
importname = '.'.join([path, name])
try:
component_ns = import_module(importname)
except ImportError as e:
raise ImportError('pyaib failed to load (%s): %r'
% (importname, e))
annotated_calls = self._find_annotated_callables(class_marker,
component_ns, config,
context)
self._install_hooks(context, annotated_calls)
return component_ns
|
|
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
from osc_lib import exceptions
from osc_lib import utils as osc_utils
from oslo_serialization import jsonutils as json
from oslo_utils import timeutils
from oslo_utils import uuidutils
from saharaclient.api import base
def get_resource(manager, name_or_id, **kwargs):
if uuidutils.is_uuid_like(name_or_id):
return manager.get(name_or_id, **kwargs)
else:
resource = manager.find_unique(name=name_or_id)
if kwargs:
# we really need additional call to apply kwargs
resource = manager.get(resource.id, **kwargs)
return resource
def created_at_sorted(objs, reverse=False):
return sorted(objs, key=created_at_key, reverse=reverse)
def random_name(prefix=None):
return "%s-%s" % (prefix, uuidutils.generate_uuid()[:8])
def created_at_key(obj):
return timeutils.parse_isotime(obj["created_at"])
def get_resource_id(manager, name_or_id):
if uuidutils.is_uuid_like(name_or_id):
return name_or_id
else:
return manager.find_unique(name=name_or_id).id
def create_dict_from_kwargs(**kwargs):
return {k: v for (k, v) in kwargs.items() if v is not None}
def prepare_data(data, fields):
new_data = {}
for f in fields:
if f in data:
new_data[f.replace('_', ' ').capitalize()] = data[f]
return new_data
def unzip(data):
return zip(*data)
def extend_columns(columns, items):
return unzip(list(unzip(columns)) + [('', '')] + items)
def prepare_column_headers(columns, remap=None):
remap = remap if remap else {}
new_columns = []
for c in columns:
for old, new in remap.items():
c = c.replace(old, new)
new_columns.append(c.replace('_', ' ').capitalize())
return new_columns
def get_by_name_substring(data, name):
return [obj for obj in data if name in obj.name]
def wait_for_delete(manager, obj_id, sleep_time=5, timeout=3000):
s_time = timeutils.utcnow()
while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout:
try:
manager.get(obj_id)
except base.APIException as ex:
if ex.error_code == 404:
return True
raise
time.sleep(sleep_time)
return False
def get_api_version(app):
return app.api_version['data_processing']
def is_api_v2(app):
if get_api_version(app) == '2':
return True
return False
def _cluster_templates_configure_ng(app, node_groups, client):
node_groups_list = dict(
map(lambda x: x.split(':', 1), node_groups))
node_groups = []
plugins_versions = set()
for name, count in node_groups_list.items():
ng = get_resource(client.node_group_templates, name)
node_groups.append({'name': ng.name,
'count': int(count),
'node_group_template_id': ng.id})
if is_api_v2(app):
plugins_versions.add((ng.plugin_name, ng.plugin_version))
else:
plugins_versions.add((ng.plugin_name, ng.hadoop_version))
if len(plugins_versions) != 1:
raise exceptions.CommandError('Node groups with the same plugins '
'and versions must be specified')
plugin, plugin_version = plugins_versions.pop()
return plugin, plugin_version, node_groups
def _get_plugin_version(app, cluster_template, client):
ct = get_resource(client.cluster_templates, cluster_template)
if is_api_v2(app):
return ct.plugin_name, ct.plugin_version, ct.id
else:
return ct.plugin_name, ct.hadoop_version, ct.id
def create_job_templates(app, client, mains_ids, libs_ids, parsed_args):
args_dict = dict(name=parsed_args.name,
type=parsed_args.type,
mains=mains_ids,
libs=libs_ids,
description=parsed_args.description,
interface=parsed_args.interface,
is_public=parsed_args.public,
is_protected=parsed_args.protected)
if is_api_v2(app):
data = client.job_templates.create(**args_dict).to_dict()
else:
data = client.jobs.create(**args_dict).to_dict()
return data
def create_job_template_json(app, client, **template):
if is_api_v2(app):
data = client.job_templates.create(**template).to_dict()
else:
data = client.jobs.create(**template).to_dict()
return data
def list_job_templates(app, client, search_opts):
if is_api_v2(app):
data = client.job_templates.list(search_opts=search_opts)
else:
data = client.jobs.list(search_opts=search_opts)
return data
def get_job_templates_resources(app, client, parsed_args):
if is_api_v2(app):
data = get_resource(
client.job_templates, parsed_args.job_template).to_dict()
else:
data = get_resource(
client.jobs, parsed_args.job_template).to_dict()
return data
def delete_job_templates(app, client, jt):
if is_api_v2(app):
jt_id = get_resource_id(client.job_templates, jt)
client.job_templates.delete(jt_id)
else:
jt_id = get_resource_id(client.jobs, jt)
client.jobs.delete(jt_id)
def get_job_template_id(app, client, parsed_args):
if is_api_v2(app):
jt_id = get_resource_id(
client.job_templates, parsed_args.job_template)
else:
jt_id = get_resource_id(
client.jobs, parsed_args.job_template)
return jt_id
def update_job_templates(app, client, jt_id, update_data):
if is_api_v2(app):
data = client.job_templates.update(jt_id, **update_data).job_template
else:
data = client.jobs.update(jt_id, **update_data).job
return data
def create_cluster_template(app, client, plugin, plugin_version,
parsed_args, configs, shares, node_groups):
args_dict = dict(
name=parsed_args.name,
plugin_name=plugin,
description=parsed_args.description,
node_groups=node_groups,
use_autoconfig=parsed_args.autoconfig,
cluster_configs=configs,
shares=shares,
is_public=parsed_args.public,
is_protected=parsed_args.protected,
domain_name=parsed_args.domain_name)
if is_api_v2(app):
args_dict['plugin_version'] = plugin_version
else:
args_dict['hadoop_version'] = plugin_version
data = client.cluster_templates.create(**args_dict).to_dict()
return data
def update_cluster_template(app, client, plugin, plugin_version,
parsed_args, configs, shares, node_groups, ct_id):
args_dict = dict(
name=parsed_args.name,
plugin_name=plugin,
description=parsed_args.description,
node_groups=node_groups,
use_autoconfig=parsed_args.use_autoconfig,
cluster_configs=configs,
shares=shares,
is_public=parsed_args.is_public,
is_protected=parsed_args.is_protected,
domain_name=parsed_args.domain_name
)
if is_api_v2(app):
args_dict['plugin_version'] = plugin_version
else:
args_dict['hadoop_version'] = plugin_version
update_dict = create_dict_from_kwargs(**args_dict)
data = client.cluster_templates.update(
ct_id, **update_dict).to_dict()
return data
def create_cluster(client, app, parsed_args, plugin, plugin_version,
template_id, image_id, net_id):
args = dict(
name=parsed_args.name,
plugin_name=plugin,
cluster_template_id=template_id,
default_image_id=image_id,
description=parsed_args.description,
is_transient=parsed_args.transient,
user_keypair_id=parsed_args.user_keypair,
net_id=net_id,
count=parsed_args.count,
is_public=parsed_args.public,
is_protected=parsed_args.protected)
if is_api_v2(app):
args['plugin_version'] = plugin_version
else:
args['hadoop_version'] = plugin_version
data = client.clusters.create(**args).to_dict()
return data
def create_job(client, app, jt_id, cluster_id, input_id, output_id,
job_configs, parsed_args):
args_dict = dict(cluster_id=cluster_id,
input_id=input_id,
output_id=output_id,
interface=parsed_args.interface,
configs=job_configs,
is_public=parsed_args.public,
is_protected=parsed_args.protected)
if is_api_v2(app):
args_dict['job_template_id'] = jt_id
data = client.jobs.create(**args_dict).to_dict()
else:
args_dict['job_id'] = jt_id
data = client.job_executions.create(**args_dict).to_dict()
return data
def create_job_json(client, app, **template):
if is_api_v2(app):
data = client.jobs.create(**template).to_dict()
else:
data = client.job_executions.create(**template).to_dict()
return data
def update_job(client, app, parsed_args, update_dict):
if is_api_v2(app):
data = client.jobs.update(
parsed_args.job, **update_dict).job
else:
data = client.job_executions.update(
parsed_args.job, **update_dict).job_execution
return data
def create_node_group_templates(client, app, parsed_args, flavor_id, configs,
shares):
if app.api_version['data_processing'] == '2':
data = client.node_group_templates.create(
name=parsed_args.name,
plugin_name=parsed_args.plugin,
plugin_version=parsed_args.plugin_version,
flavor_id=flavor_id,
description=parsed_args.description,
volumes_per_node=parsed_args.volumes_per_node,
volumes_size=parsed_args.volumes_size,
node_processes=parsed_args.processes,
floating_ip_pool=parsed_args.floating_ip_pool,
security_groups=parsed_args.security_groups,
auto_security_group=parsed_args.auto_security_group,
availability_zone=parsed_args.availability_zone,
volume_type=parsed_args.volumes_type,
is_proxy_gateway=parsed_args.proxy_gateway,
volume_local_to_instance=parsed_args.volumes_locality,
use_autoconfig=parsed_args.autoconfig,
is_public=parsed_args.public,
is_protected=parsed_args.protected,
node_configs=configs,
shares=shares,
volumes_availability_zone=(
parsed_args.volumes_availability_zone),
volume_mount_prefix=parsed_args.volumes_mount_prefix,
boot_from_volume=parsed_args.boot_from_volume,
boot_volume_type=parsed_args.boot_volume_type,
boot_volume_availability_zone=(
parsed_args.boot_volume_availability_zone),
boot_volume_local_to_instance=(
parsed_args.boot_volume_local_to_instance)
).to_dict()
else:
data = client.node_group_templates.create(
name=parsed_args.name,
plugin_name=parsed_args.plugin,
hadoop_version=parsed_args.plugin_version,
flavor_id=flavor_id,
description=parsed_args.description,
volumes_per_node=parsed_args.volumes_per_node,
volumes_size=parsed_args.volumes_size,
node_processes=parsed_args.processes,
floating_ip_pool=parsed_args.floating_ip_pool,
security_groups=parsed_args.security_groups,
auto_security_group=parsed_args.auto_security_group,
availability_zone=parsed_args.availability_zone,
volume_type=parsed_args.volumes_type,
is_proxy_gateway=parsed_args.proxy_gateway,
volume_local_to_instance=parsed_args.volumes_locality,
use_autoconfig=parsed_args.autoconfig,
is_public=parsed_args.public,
is_protected=parsed_args.protected,
node_configs=configs,
shares=shares,
volumes_availability_zone=(
parsed_args.volumes_availability_zone),
volume_mount_prefix=parsed_args.volumes_mount_prefix).to_dict()
return data
class NodeGroupTemplatesUtils(object):
def _create_take_action(self, client, app, parsed_args):
if parsed_args.json:
blob = osc_utils.read_blob_file_contents(parsed_args.json)
try:
template = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'template from file %s: %s' % (parsed_args.json, e))
data = client.node_group_templates.create(**template).to_dict()
else:
if (not parsed_args.name or not parsed_args.plugin or
not parsed_args.plugin_version or not parsed_args.flavor or
not parsed_args.processes):
raise exceptions.CommandError(
'At least --name, --plugin, --plugin-version, --processes,'
' --flavor arguments should be specified or json template '
'should be provided with --json argument')
configs = None
if parsed_args.configs:
blob = osc_utils.read_blob_file_contents(parsed_args.configs)
try:
configs = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'configs from file %s: %s' % (parsed_args.configs, e))
shares = None
if parsed_args.shares:
blob = osc_utils.read_blob_file_contents(parsed_args.shares)
try:
shares = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'shares from file %s: %s' % (parsed_args.shares, e))
compute_client = app.client_manager.compute
flavor_id = osc_utils.find_resource(
compute_client.flavors, parsed_args.flavor).id
data = create_node_group_templates(client, app, parsed_args,
flavor_id, configs, shares)
return data
def _list_take_action(self, client, app, parsed_args):
search_opts = {}
if parsed_args.plugin:
search_opts['plugin_name'] = parsed_args.plugin
if parsed_args.plugin_version:
search_opts['hadoop_version'] = parsed_args.plugin_version
data = client.node_group_templates.list(search_opts=search_opts)
if parsed_args.name:
data = get_by_name_substring(data, parsed_args.name)
if app.api_version['data_processing'] == '2':
if parsed_args.long:
columns = ('name', 'id', 'plugin_name', 'plugin_version',
'node_processes', 'description')
column_headers = prepare_column_headers(columns)
else:
columns = ('name', 'id', 'plugin_name', 'plugin_version')
column_headers = prepare_column_headers(columns)
else:
if parsed_args.long:
columns = ('name', 'id', 'plugin_name', 'hadoop_version',
'node_processes', 'description')
column_headers = prepare_column_headers(
columns, {'hadoop_version': 'plugin_version'})
else:
columns = ('name', 'id', 'plugin_name', 'hadoop_version')
column_headers = prepare_column_headers(
columns, {'hadoop_version': 'plugin_version'})
return (
column_headers,
(osc_utils.get_item_properties(
s,
columns,
formatters={
'node_processes': osc_utils.format_list
}
) for s in data)
)
def _update_take_action(self, client, app, parsed_args):
ngt_id = get_resource_id(
client.node_group_templates, parsed_args.node_group_template)
if parsed_args.json:
blob = osc_utils.read_blob_file_contents(parsed_args.json)
try:
template = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'template from file %s: %s' % (parsed_args.json, e))
data = client.node_group_templates.update(
ngt_id, **template).to_dict()
else:
configs = None
if parsed_args.configs:
blob = osc_utils.read_blob_file_contents(parsed_args.configs)
try:
configs = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'configs from file %s: %s' % (parsed_args.configs, e))
shares = None
if parsed_args.shares:
blob = osc_utils.read_blob_file_contents(parsed_args.shares)
try:
shares = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'shares from file %s: %s' % (parsed_args.shares, e))
flavor_id = None
if parsed_args.flavor:
compute_client = self.app.client_manager.compute
flavor_id = osc_utils.find_resource(
compute_client.flavors, parsed_args.flavor).id
update_dict = create_dict_from_kwargs(
name=parsed_args.name,
plugin_name=parsed_args.plugin,
hadoop_version=parsed_args.plugin_version,
flavor_id=flavor_id,
description=parsed_args.description,
volumes_per_node=parsed_args.volumes_per_node,
volumes_size=parsed_args.volumes_size,
node_processes=parsed_args.processes,
floating_ip_pool=parsed_args.floating_ip_pool,
security_groups=parsed_args.security_groups,
auto_security_group=parsed_args.use_auto_security_group,
availability_zone=parsed_args.availability_zone,
volume_type=parsed_args.volumes_type,
is_proxy_gateway=parsed_args.is_proxy_gateway,
volume_local_to_instance=parsed_args.volume_locality,
use_autoconfig=parsed_args.use_autoconfig,
is_public=parsed_args.is_public,
is_protected=parsed_args.is_protected,
node_configs=configs,
shares=shares,
volumes_availability_zone=(
parsed_args.volumes_availability_zone),
volume_mount_prefix=parsed_args.volumes_mount_prefix
)
if app.api_version['data_processing'] == '2':
if 'hadoop_version' in update_dict:
update_dict.pop('hadoop_version')
update_dict['plugin_version'] = parsed_args.plugin_version
if parsed_args.boot_from_volume is not None:
update_dict['boot_from_volume'] = (
parsed_args.boot_from_volume)
if parsed_args.boot_volume_type is not None:
update_dict['boot_volume_type'] = (
parsed_args.boot_volume_type)
if parsed_args.boot_volume_availability_zone is not None:
update_dict['boot_volume_availability_zone'] = (
parsed_args.boot_volume_availability_zone)
if parsed_args.boot_volume_local_to_instance is not None:
update_dict['boot_volume_local_to_instance'] = (
parsed_args.boot_volume_local_to_instance)
data = client.node_group_templates.update(
ngt_id, **update_dict).to_dict()
return data
def _import_take_action(self, client, parsed_args):
if (not parsed_args.image_id or
not parsed_args.flavor_id):
raise exceptions.CommandError(
'At least --image_id and --flavor_id should be specified')
blob = osc_utils.read_blob_file_contents(parsed_args.json)
try:
template = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'template from file %s: %s' % (parsed_args.json, e))
template['node_group_template']['floating_ip_pool'] = (
parsed_args.floating_ip_pool)
template['node_group_template']['image_id'] = (
parsed_args.image_id)
template['node_group_template']['flavor_id'] = (
parsed_args.flavor_id)
template['node_group_template']['security_groups'] = (
parsed_args.security_groups)
if parsed_args.name:
template['node_group_template']['name'] = parsed_args.name
data = client.node_group_templates.create(
**template['node_group_template']).to_dict()
return data
def _export_take_action(self, client, parsed_args):
ngt_id = get_resource_id(
client.node_group_templates, parsed_args.node_group_template)
response = client.node_group_templates.export(ngt_id)
result = json.dumps(response._info, indent=4)+"\n"
if parsed_args.file:
with open(parsed_args.file, "w+") as file:
file.write(result)
else:
sys.stdout.write(result)
|
|
from PythonQt import QtCore, QtGui, QtUiTools
import director.applogic as app
import math
import numpy as np
from director.timercallback import TimerCallback
from director.simpletimer import SimpleTimer
from director.debugVis import DebugData
from director import planplayback
import director.visualization as vis
import director.vtkAll as vtk
import scipy.interpolate
def addWidgetsToDict(widgets, d):
for widget in widgets:
if widget.objectName:
d[str(widget.objectName)] = widget
addWidgetsToDict(widget.children(), d)
class WidgetDict(object):
def __init__(self, widgets):
addWidgetsToDict(widgets, self.__dict__)
def clearLayout(w):
children = w.findChildren(QtGui.QWidget)
for child in children:
child.delete()
class PlaybackPanel(object):
def __init__(self, planPlayback, playbackRobotModel, playbackJointController, robotStateModel, robotStateJointController, manipPlanner):
self.planPlayback = planPlayback
self.playbackRobotModel = playbackRobotModel
self.playbackJointController = playbackJointController
self.robotStateModel = robotStateModel
self.robotStateJointController = robotStateJointController
self.manipPlanner = manipPlanner
manipPlanner.connectPlanCommitted(self.onPlanCommitted)
manipPlanner.connectUseSupports(self.updateButtonColor)
self.autoPlay = True
self.animateOnExecute = False
#self.useOperationColors()
self.useDevelopmentColors()
self.planFramesObj = None
self.plan = None
self.poseInterpolator = None
self.startTime = 0.0
self.endTime = 1.0
self.animationTimer = TimerCallback()
self.animationTimer.targetFps = 60
self.animationTimer.callback = self.updateAnimation
self.animationClock = SimpleTimer()
loader = QtUiTools.QUiLoader()
uifile = QtCore.QFile(':/ui/ddPlaybackPanel.ui')
assert uifile.open(uifile.ReadOnly)
self.widget = loader.load(uifile)
uifile.close()
self.ui = WidgetDict(self.widget.children())
self.ui.viewModeCombo.connect('currentIndexChanged(const QString&)', self.viewModeChanged)
self.ui.playbackSpeedCombo.connect('currentIndexChanged(const QString&)', self.playbackSpeedChanged)
self.ui.interpolationCombo.connect('currentIndexChanged(const QString&)', self.interpolationChanged)
self.ui.samplesSpinBox.connect('valueChanged(int)', self.numberOfSamplesChanged)
self.ui.playbackSlider.connect('valueChanged(int)', self.playbackSliderValueChanged)
self.ui.animateButton.connect('clicked()', self.animateClicked)
self.ui.hideButton.connect('clicked()', self.hideClicked)
self.ui.executeButton.connect('clicked()', self.executeClicked)
self.ui.executeButton.setShortcut(QtGui.QKeySequence('Ctrl+Return'))
self.ui.stopButton.connect('clicked()', self.stopClicked)
self.ui.executeButton.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.ui.executeButton.connect('customContextMenuRequested(const QPoint&)', self.showExecuteContextMenu)
self.setPlan(None)
self.hideClicked()
def useDevelopmentColors(self):
self.robotStateModelDisplayAlpha = 0.1
self.playbackRobotModelUseTextures = False
self.playbackRobotModelDisplayAlpha = 1
def useOperationColors(self):
self.robotStateModelDisplayAlpha = 1
self.playbackRobotModelUseTextures = False
self.playbackRobotModelDisplayAlpha = 0.5
def showExecuteContextMenu(self, clickPosition):
globalPos = self.ui.executeButton.mapToGlobal(clickPosition)
menu = QtGui.QMenu()
menu.addAction('Visualization Only')
if not self.isPlanFeasible():
menu.addSeparator()
if self.isPlanAPlanWithSupports():
menu.addAction('Execute infeasible plan with supports')
else:
menu.addAction('Execute infeasible plan')
elif self.isPlanAPlanWithSupports():
menu.addSeparator()
menu.addAction('Execute plan with supports')
selectedAction = menu.exec_(globalPos)
if not selectedAction:
return
if selectedAction.text == 'Visualization Only':
self.executePlan(visOnly=True)
elif selectedAction.text == 'Execute infeasible plan':
self.executePlan(overrideInfeasibleCheck=True)
elif selectedAction.text == 'Execute plan with supports':
self.executePlan(overrideSupportsCheck=True)
elif selectedAction.text == 'Execute infeasible plan with supports':
self.executePlan(overrideInfeasibleCheck=True, overrideSupportsCheck=True)
def getViewMode(self):
return str(self.ui.viewModeCombo.currentText)
def setViewMode(self, mode):
'''
Set the mode of the view widget. input arg: 'continous', 'frames', 'hidden'
e.g. can hide all plan playback with 'hidden'
'''
self.ui.viewModeCombo.setCurrentIndex(self.ui.viewModeCombo.findText(mode))
def getPlaybackSpeed(self):
s = str(self.ui.playbackSpeedCombo.currentText).replace('x', '')
if '/' in s:
n, d = s.split('/')
return float(n)/float(d)
return float(s)
def getInterpolationMethod(self):
return str(self.ui.interpolationCombo.currentText)
def getNumberOfSamples(self):
return self.ui.samplesSpinBox.value
def viewModeChanged(self):
viewMode = self.getViewMode()
if viewMode == 'continuous':
playbackVisible = True
samplesVisible = False
interpolationVisible = True
elif viewMode == 'frames':
playbackVisible = False
samplesVisible = True
interpolationVisible = True
elif viewMode == 'hidden':
playbackVisible = False
samplesVisible = False
interpolationVisible = False
else:
raise Exception('Unexpected view mode')
self.ui.samplesLabel.setVisible(samplesVisible)
self.ui.samplesSpinBox.setVisible(samplesVisible)
self.ui.interpolationLabel.setVisible(interpolationVisible)
self.ui.interpolationCombo.setVisible(interpolationVisible)
self.ui.playbackSpeedLabel.setVisible(playbackVisible)
self.ui.playbackSpeedCombo.setVisible(playbackVisible)
self.ui.playbackSlider.setEnabled(playbackVisible)
self.ui.animateButton.setVisible(playbackVisible)
self.ui.timeLabel.setVisible(playbackVisible)
self.hidePlan()
if self.plan:
if viewMode == 'continuous' and self.autoPlay:
self.startAnimation()
elif viewMode == 'frames':
self.updatePlanFrames()
def playbackSpeedChanged(self):
self.planPlayback.playbackSpeed = self.getPlaybackSpeed()
def getPlaybackTime(self):
sliderValue = self.ui.playbackSlider.value
return (sliderValue / 1000.0) * self.endTime
def updateTimeLabel(self):
playbackTime = self.getPlaybackTime()
self.ui.timeLabel.text = 'Time: %.2f s' % playbackTime
def playbackSliderValueChanged(self):
self.updateTimeLabel()
self.showPoseAtTime(self.getPlaybackTime())
def interpolationChanged(self):
methods = {'linear' : 'slinear',
'cubic spline' : 'cubic',
'pchip' : 'pchip' }
self.planPlayback.interpolationMethod = methods[self.getInterpolationMethod()]
self.poseInterpolator = self.planPlayback.getPoseInterpolatorFromPlan(self.plan)
self.updatePlanFrames()
def numberOfSamplesChanged(self):
self.updatePlanFrames()
def animateClicked(self):
self.startAnimation()
def hideClicked(self):
if self.ui.hideButton.text == 'hide':
self.ui.playbackFrame.setEnabled(False)
self.hidePlan()
self.ui.hideButton.text = 'show'
self.ui.executeButton.setEnabled(False)
if not self.plan:
self.ui.hideButton.setEnabled(False)
else:
self.ui.playbackFrame.setEnabled(True)
self.ui.hideButton.text = 'hide'
self.ui.hideButton.setEnabled(True)
self.ui.executeButton.setEnabled(True)
self.viewModeChanged()
self.updateButtonColor()
def executeClicked(self):
self.executePlan()
def executePlan(self, visOnly=False, overrideInfeasibleCheck=False, overrideSupportsCheck=False):
if visOnly:
_, poses = self.planPlayback.getPlanPoses(self.plan)
self.onPlanCommitted(self.plan)
self.robotStateJointController.setPose('EST_ROBOT_STATE', poses[-1])
else:
if (self.isPlanFeasible() or overrideInfeasibleCheck) and (not self.isPlanAPlanWithSupports() or overrideSupportsCheck):
self.manipPlanner.commitManipPlan(self.plan)
def onPlanCommitted(self, plan):
if self.animateOnExecute:
self.startAnimation()
self.playbackRobotModel.setProperty('Visible', True)
self.playbackRobotModel.setProperty('Alpha', 0.1)
self.robotStateModel.setProperty('Visible', True)
self.robotStateModel.setProperty('Alpha', 1.0)
else:
self.setPlan(None)
self.hideClicked()
def stopClicked(self):
self.stopAnimation()
self.manipPlanner.sendPlanPause()
def isPlanFeasible(self):
plan = planplayback.asRobotPlan(self.plan)
return plan is not None and (max(plan.plan_info) < 10 and min(plan.plan_info) >= 0)
def getPlanInfo(self, plan):
plan = planplayback.asRobotPlan(self.plan)
return max(plan.plan_info)
def isPlanAPlanWithSupports(self):
return hasattr(self.plan, 'support_sequence') or self.manipPlanner.publishPlansWithSupports
def updatePlanFrames(self):
if self.getViewMode() != 'frames':
return
numberOfSamples = self.getNumberOfSamples()
meshes = self.planPlayback.getPlanPoseMeshes(self.plan, self.playbackJointController, self.playbackRobotModel, numberOfSamples)
d = DebugData()
startColor = [0.8, 0.8, 0.8]
endColor = [85/255.0, 255/255.0, 255/255.0]
colorFunc = scipy.interpolate.interp1d([0, numberOfSamples-1], [startColor, endColor], axis=0, kind='slinear')
for i, mesh in reversed(list(enumerate(meshes))):
d.addPolyData(mesh, color=colorFunc(i))
pd = d.getPolyData()
clean = vtk.vtkCleanPolyData()
clean.SetInput(pd)
clean.Update()
pd = clean.GetOutput()
self.planFramesObj = vis.updatePolyData(d.getPolyData(), 'robot plan', alpha=1.0, visible=False, colorByName='RGB255', parent='planning')
self.showPlanFrames()
def showPlanFrames(self):
self.planFramesObj.setProperty('Visible', True)
self.robotStateModel.setProperty('Visible', False)
self.playbackRobotModel.setProperty('Visible', False)
def startAnimation(self):
self.showPlaybackModel()
self.stopAnimation()
self.ui.playbackSlider.value = 0
self.animationClock.reset()
self.animationTimer.start()
self.updateAnimation()
def stopAnimation(self):
self.animationTimer.stop()
def showPlaybackModel(self):
self.robotStateModel.setProperty('Visible', True)
self.playbackRobotModel.setProperty('Visible', True)
self.playbackRobotModel.setProperty('Color Mode', 1 if self.playbackRobotModelUseTextures else 0)
self.robotStateModel.setProperty('Alpha', self.robotStateModelDisplayAlpha)
self.playbackRobotModel.setProperty('Alpha', self.playbackRobotModelDisplayAlpha)
if self.planFramesObj:
self.planFramesObj.setProperty('Visible', False)
def hidePlan(self):
self.stopAnimation()
self.ui.playbackSlider.value = 0
wasShowing = self.playbackRobotModel.getProperty('Visible') or (self.planFramesObj and self.planFramesObj.getProperty('Visible'))
if self.planFramesObj:
self.planFramesObj.setProperty('Visible', False)
if self.playbackRobotModel:
self.playbackRobotModel.setProperty('Visible', False)
if wasShowing:
self.robotStateModel.setProperty('Visible', True)
self.robotStateModel.setProperty('Alpha', 1.0)
def showPoseAtTime(self, time):
pose = self.poseInterpolator(time)
self.playbackJointController.setPose('plan_playback', pose)
def updateAnimation(self):
tNow = self.animationClock.elapsed() * self.planPlayback.playbackSpeed
if tNow > self.endTime:
tNow = self.endTime
sliderValue = int(1000.0 * tNow / self.endTime)
self.ui.playbackSlider.blockSignals(True)
self.ui.playbackSlider.value = sliderValue
self.ui.playbackSlider.blockSignals(False)
self.updateTimeLabel()
self.showPoseAtTime(tNow)
return tNow < self.endTime
def updateButtonColor(self):
if self.ui.executeButton.enabled and self.plan and not self.isPlanFeasible():
styleSheet = 'background-color:red'
elif self.ui.executeButton.enabled and self.plan and self.isPlanAPlanWithSupports():
styleSheet = 'background-color:orange'
else:
styleSheet = ''
self.ui.executeButton.setStyleSheet(styleSheet)
def setPlan(self, plan):
self.ui.playbackSlider.value = 0
self.ui.timeLabel.text = 'Time: 0.00 s'
self.ui.planNameLabel.text = ''
self.plan = plan
self.endTime = 1.0
self.updateButtonColor()
if not self.plan:
return
planText = 'Plan: %d. %.2f seconds' % (plan.utime, self.planPlayback.getPlanElapsedTime(plan))
self.ui.planNameLabel.text = planText
self.startTime = 0.0
self.endTime = self.planPlayback.getPlanElapsedTime(plan)
self.interpolationChanged()
info = self.getPlanInfo(plan)
app.displaySnoptInfo(info)
if self.ui.hideButton.text == 'show':
self.hideClicked()
else:
self.viewModeChanged()
self.updateButtonColor()
if self.autoPlay and self.widget.parent() is not None:
self.widget.parent().show()
def addPanelToMainWindow(playbackPanel):
global panel
global dock
panel = playbackPanel
dock = app.addWidgetToDock(panel.widget, dockArea=QtCore.Qt.BottomDockWidgetArea)
dock.hide()
def init(planPlayback, playbackRobotModel, playbackJointController, robotStateModel, robotStateJointController, manipPlanner):
panel = PlaybackPanel(planPlayback, playbackRobotModel, playbackJointController, robotStateModel, robotStateJointController, manipPlanner)
addPanelToMainWindow(panel)
return panel
|
|
import ipuz
import ipuz_Helper
import json
import sys
unlock_state="disabled"
notes_state="disabled"
is_puz_rebus=False
Encoding_2 = "ISO-8859-1"
class File():
title=None
author=None
cpyrt=None
notes=None
width=0
height=0
solnblock=[]
cellblock=[]
acc=0
dwn=0
across=[]
down=[]
loc=""
# is_multi is set to 1 in order to input rebus entries for a cell; it can be turned off only after 'enter' key is pressed
is_multi=0
multi=[]
across=[]
down=[]
cellblock=[]
solnblock=[]
row_cellno=[]
col_cellno=[]
cellno=[]
pencil=[]
valid=[]
gext=[]
time=0
time_state=0
ifil = input('Enter a file name along with path: ')
ofile_txt=ifil
data_file = open(ifil,'r')
data = data_file.read()
data_file.close()
# puzzle description read from the ipuz file is stored in the 'puzzle' instance
try:
puzzle = ipuz.read(data)
except ipuz.IPUZException:
print("Sorry, File corrupted")
sys.exit(0)
if 'block' in puzzle:
block=puzzle['block']
else:
block="#"
if 'empty' in puzzle:
empty=puzzle['empty']
try:
empty=int(empty)
except ValueError:
pass
else:
empty=0
if 'title' in puzzle:
title=puzzle['title']
else:
title='title'
if 'author' in puzzle:
author=puzzle['author']
else:
author='author'
if 'copyright' in puzzle:
cpyrt=puzzle['copyright']
else:
cpyrt='copyright'
if 'notes' in puzzle:
notes=puzzle['notes']
notes_state="normal"
else:
notes=''
if 'Across' in puzzle['clues'] and 'Down' in puzzle['clues']:
for i in range(0,len(puzzle['clues']['Across'])):
l=puzzle['clues']['Across'][i]
across.append([])
if isinstance(l,dict):
across[i].append(l['number'])
across[i].append(l['clue'])
else:
across[i].append(l[0])
across[i].append(l[1])
acc=len(across)
for i in range(0,len(puzzle['clues']['Down'])):
l=puzzle['clues']['Down'][i]
down.append([])
if isinstance(l,dict):
down[i].append(l['number'])
down[i].append(l['clue'])
else:
down[i].append(l[0])
down[i].append(l[1])
dwn=len(down)
if isinstance(puzzle['dimensions']['height'],str):
height=int(puzzle['dimensions']['height'])
else:
height=puzzle['dimensions']['height']
if isinstance(puzzle['dimensions']['width'],str):
width=int(puzzle['dimensions']['width'])
else:
width=puzzle['dimensions']['width']
for i in range(0,height):
# current state of the grid
cellblock.append([])
# stores the position of cell numbers for cells in the grid
cellno.append([])
# stores all the pencil entries in the grid
pencil.append([])
# stores the valid/invalid state of each entry in the grid
valid.append([])
# if available, stores the solution for puzzle; else all cell entries are assigned the character 'A'
solnblock.append([])
# stores details of circled, previously incorrect, incorrect or revealed entries present in the grid
gext.append([])
for j in range(0,width):
pencil[i].append(0)
valid[i].append(0)
gext[i].append(0)
if isinstance(puzzle['puzzle'][i][j],dict):
cellblock[i].append(puzzle['puzzle'][i][j]['cell'])
else:
cellblock[i].append(puzzle['puzzle'][i][j])
if cellblock[i][j]!=block and cellblock[i][j]!=empty and cellblock[i][j]!="null":
row_cellno.append(i)
col_cellno.append(j)
cellno[i].append(cellblock[i][j])
else:
cellno[i].append(0)
if cellblock[i][j]==block or cellblock[i][j]=="null" or cellblock[i][j]==None:
cellblock[i][j]="."
solnblock[i].append(".")
else:
# if an unshaded cell is encountered and any entry is present in it, stores the corresponding entry in the cell
if 'saved' in puzzle:
if isinstance(puzzle['saved'][i][j],dict):
cellblock[i][j]=puzzle['saved'][i][j]['value']
else:
cellblock[i][j]=puzzle['saved'][i][j]
if cellblock[i][j]==empty:
cellblock[i][j]="-"
else:
cellblock[i][j]=cellblock[i][j].upper()
else:
cellblock[i][j]="-"
# if an unshaded cell is encountered, stores the solution for the corresponding cell
if 'solution' in puzzle:
check_reveal_state="normal"
if isinstance(puzzle['solution'][i][j],dict):
solnblock[i].append(puzzle['solution'][i][j]['value'].upper())
else:
solnblock[i].append(puzzle['solution'][i][j].upper())
else:
check_reveal_state="disabled"
solnblock[i].append("A")
for i in range(0,height):
for j in range(0,width):
if(cellblock[i][j] in 'abcdefghijklmnopqrstuvwxyz'):
pencil[i][j]=1
cellblock[i][j]=cellblock[i][j].upper()
# calc_across and calc_down are for calculating current state of the across and down clues respectively
def calc_across(ch=1):
for i in range(0,acc):
temp=across[i][0]
c_row=row_cellno[temp-1]
c_col=col_cellno[temp-1]
curstr=""
while((c_col<width) and (cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":")):
curstr=curstr+cellblock[c_row][c_col]
c_col=c_col+1
if(ch==0):
across[i].append(len(curstr))
across[i].append(curstr)
else:
across[i][3]=curstr
def calc_down(ch=1):
for i in range(0,dwn):
temp=down[i][0]
c_row=row_cellno[temp-1]
c_col=col_cellno[temp-1]
curstr=""
while(c_row<height and (cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":")):
curstr=curstr+cellblock[c_row][c_col]
c_row=c_row+1
if(ch==0):
down[i].append(len(curstr))
down[i].append(curstr)
else:
down[i][3]=curstr
# Notifies user if entire grid is filled with correct entries
def is_sol_complete():
for i in range(0,height):
for j in range(0,width):
if(cellblock[i][j]=="-"):
return
if(cellblock[i][j]!="." and cellblock[i][j]!=":" and valid[i][j]!=3):
if((is_puz_rebus==True) and (str(i)+","+str(j) in rebus_row_col)):
rebus_index=rebus_row_col.index(str(i)+","+str(j))
temp_text=rebus_content[rebus_index]
else:
temp_text=solnblock[i][j]
if(cellblock[i][j]!=temp_text):
return
print("Congratulations, You have successfully completed the puzzle")
# displays clue and asks user to enter a solution for the corresponding clue
def disp_clue(clue):
if ('across' in clue):
num=acc
user_no=int(clue.replace(' across',''))
for i in range(0,acc):
if(user_no==int(across[i][0])):
num=i
if(num==acc):
print("No such clue number exists in across cluelist")
return
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
print(str(across[num][0])+". "+across[num][1]+" ("+str(across[num][2])+") : "+across[num][3])
getstr=input('Enter word : ')
for char in getstr:
if(cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":"):
if(char not in "," and valid[c_row][c_col]!=3 ):
if char in "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz":
pencil[c_row][c_col]=0
cellblock[c_row][c_col]=char.upper()
else:
cellblock[c_row][c_col]="-"
if(valid[c_row][c_col]==2):
valid[c_row][c_col]=1
c_col=c_col+1
if(c_row==height or c_col==width):
break
else:
break
curstr=""
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
while((c_col<width) and (cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":")):
curstr=curstr+cellblock[c_row][c_col]
c_col=c_col+1
across[num][3]=curstr
calc_down()
return
if ('down' in clue):
num=dwn
user_no=int(clue.replace(' down',''))
for i in range(0,dwn):
if(user_no==int(down[i][0])):
num=i
if(num==dwn):
print("No such clue number exists in down cluelist")
return
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
print(str(down[num][0])+". "+down[num][1]+" ("+str(down[num][2])+") : "+down[num][3])
getstr=input('Enter word : ')
for char in getstr:
if(cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":"):
if(char not in "," and valid[c_row][c_col]!=3 ):
if char in "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz":
pencil[c_row][c_col]=0
cellblock[c_row][c_col]=char.upper()
else:
cellblock[c_row][c_col]="-"
if(valid[c_row][c_col]==2):
valid[c_row][c_col]=1
c_row=c_row+1
if(c_row==height or c_col==width):
break
else:
break
curstr=""
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
while(c_row<height and (cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":")):
curstr=curstr+cellblock[c_row][c_col]
c_row=c_row+1
down[num][3]=curstr
calc_across()
return
print("Sorry wrong format")
# function for rebus entry at a particular location in a word
def disp_rebus_clue(clue):
if ('across' in clue):
num=acc
user_no=int(clue.replace(' across',''))
for i in range(0,acc):
if(user_no==int(across[i][0])):
num=i
if(num==acc):
print("No such clue number exists in across cluelist")
return
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
print(str(across[num][0])+". "+across[num][1]+" ("+str(across[num][2])+") : "+across[num][3])
getstr=input('Enter the location where rebus has to be placed (for eg. in the word ABCDE, press 1 to place rebus at position A) : ')
loc=int(getstr)
if (loc>across[num][2] or loc<1):
print("Sorry location index is out of range")
return
c_col=c_col+(loc-1)
if (valid[c_row][c_col]==3):
print("Sorry the cellblock at this location has already been revealed")
return
getstr=input('Enter the rebus word : ')
text=""
for char in getstr:
if char in "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz":
text=text+char.upper()
if(text==""):
text="-"
pencil[c_row][c_col]=0
cellblock[c_row][c_col]=text
if(valid[c_row][c_col]==2):
valid[c_row][c_col]=1
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
curstr=""
while((c_col<width) and (cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":")):
curstr=curstr+cellblock[c_row][c_col]
c_col=c_col+1
across[num][3]=curstr
calc_down()
return
if ('down' in clue):
num=dwn
user_no=int(clue.replace(' down',''))
for i in range(0,dwn):
if(user_no==int(down[i][0])):
num=i
if(num==dwn):
print("No such clue number exists in down cluelist")
return
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
print(str(down[num][0])+". "+down[num][1]+" ("+str(down[num][2])+") : "+down[num][3])
getstr=input('Enter the location where rebus has to be placed (for eg. in the word ABCDE, press 1 to place rebus at position A) : ')
loc=int(getstr)
if (loc>down[num][2] or loc<1):
print("Sorry location index is out of range")
return
c_row=c_row+(loc-1)
if (valid[c_row][c_col]==3):
print("Sorry the cellblock at this location has already been revealed")
return
getstr=input('Enter the rebus word : ')
text=""
for char in getstr:
if char in "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz":
text=text+char.upper()
if(text==""):
text="-"
pencil[c_row][c_col]=0
cellblock[c_row][c_col]=text
if(valid[c_row][c_col]==2):
valid[c_row][c_col]=1
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
curstr=""
while(c_row<height and (cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":")):
curstr=curstr+cellblock[c_row][c_col]
c_row=c_row+1
down[num][3]=curstr
calc_across()
return
print("Sorry wrong format")
# view all across and down clues along with their current state
def view_acc():
for i in range(0,acc):
temp=str(across[i][0])+". "+across[i][1]+" ("+str(across[i][2])+") : "+across[i][3]
print(temp)
def view_dwn():
for i in range(0,dwn):
temp=str(down[i][0])+". "+down[i][1]+" ("+str(down[i][2])+") : "+down[i][3]
print(temp)
# clears all the entries in the cells
def clear_cells():
for i in range(0,height):
for j in range(0,width):
valid[i][j]=0
pencil[i][j]=0
if cellblock[i][j]!="." and cellblock[i][j]!=":":
cellblock[i][j]="-"
j=j+1
i=i+1
calc_across()
calc_down()
# view current state of the puzzle
def view_cur():
temp=""
for i in range(0,height):
temp=""
for j in range(0,width):
temp=temp+" "+cellblock[i][j]
j=j+1
print(temp)
i=i+1
# checks the letter in the given row and column of grid with the corresponding letter in the solution
def check(c_row,c_col):
global valid
valid_count=True
if(cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":" and valid[c_row][c_col]!=3):
if((is_puz_rebus==True) and (str(c_row)+","+str(c_col) in rebus_row_col)):
rebus_index=rebus_row_col.index(str(c_row)+","+str(c_col))
temp_text=rebus_content[rebus_index]
else:
temp_text=solnblock[c_row][c_col]
if(cellblock[c_row][c_col]==temp_text or cellblock[c_row][c_col]=="-"):
valid_count=True
else:
valid_count=False
valid[c_row][c_col]=2
return valid_count
# checks the validity of a single letter in a word for a given clue
def check_one():
clue= input('Enter clue number (for e.g "1 across"): ')
if ('across' in clue):
num=acc
user_no=int(clue.replace(' across',''))
for i in range(0,acc):
if(user_no==int(across[i][0])):
num=i
if(num==acc):
print("No such clue number exists in across cluelist")
return
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
print(str(across[num][0])+". "+across[num][1]+" ("+str(across[num][2])+") : "+across[num][3])
getstr=input('Enter the location which has to be checked in the word (for eg. in the word ABCDE, press 1 to check the letter in position A) : ')
loc=int(getstr)
if (loc>across[num][2] or loc<1):
print("Sorry location index is out of range")
return
c_col=c_col+(loc-1)
v=check(c_row,c_col)
if (v==True):
print("The letter is correct")
else:
print("Sorry, the letter seems to be incorrect")
return
if ('down' in clue):
num=dwn
user_no=int(clue.replace(' down',''))
for i in range(0,dwn):
if(user_no==int(down[i][0])):
num=i
if(num==dwn):
print("No such clue number exists in down cluelist")
return
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
print(str(down[num][0])+". "+down[num][1]+" ("+str(down[num][2])+") : "+down[num][3])
getstr=input('Enter the location which has to be checked in the word (for eg. in the word ABCDE, press 1 to check the letter in position A) : ')
loc=int(getstr)
if (loc>down[num][2] or loc<1):
print("Sorry location index is out of range")
return
c_row=c_row+(loc-1)
v=check(c_row,c_col)
if (v==True):
print("The letter is correct")
else:
print("Sorry, the letter seems to be incorrect")
return
print("Sorry wrong format")
# checks the validity of a word for a given clue
def check_word():
ck_val=True
ad=0
clue = input('Enter clue number (for e.g "1 across"): ')
if ('across' in clue):
num=acc
user_no=int(clue.replace(' across',''))
for i in range(0,acc):
if(user_no==int(across[i][0])):
num=i
if(num==acc):
print("No such clue number exists in across cluelist")
return
ad=1
if ('down' in clue):
num=dwn
user_no=int(clue.replace(' down',''))
for i in range(0,dwn):
if(user_no==int(down[i][0])):
num=i
if(num==dwn):
print("No such clue number exists in down cluelist")
return
ad=2
if (ad==0):
print("Sorry wrong format!")
return
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
text=""
while(cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":"):
val=check(c_row,c_col)
if (val==True):
if (cellblock[c_row][c_col]=="-"):
text=text+" - "
else:
text=text+" "+cellblock[c_row][c_col]+","+"Correct "
else:
text=text+" "+cellblock[c_row][c_col]+","+"Wrong "
ck_val=ck_val and val
if(ad==1):
c_col=c_col+1
else:
c_row=c_row+1
if (c_row == height or c_col==width):
break
if(ck_val==True):
print("No incorrect letters found!")
else:
print("Sorry there are some incorrect letters in the word")
print(text)
return
# checks the validity of the entire grid
def check_all():
ck_val=True
text=""
for i in range(0,height):
for j in range(0,width):
val=check(i,j)
if (val==True):
if (cellblock[i][j]=="-" or (cellblock[i][j]=="." or cellblock[i][j]==":" )):
text=text+" "+cellblock[i][j]+" "
else:
text=text+" "+cellblock[i][j]+","+"Correct "
else:
text=text+" "+cellblock[i][j]+","+"Wrong "
ck_val=ck_val and val
j=j+1
text=text+"\n"
i=i+1
if(ck_val==True):
print("No incorrect letters found!")
else:
print("Sorry there are some incorrect entries in the grid")
print(text)
return
# reveals the solution for the given row and column of grid
def reveal(i,j):
global valid
correct_entry=False
if((is_puz_rebus==True) and (str(i)+","+str(j) in rebus_row_col)):
rebus_index=rebus_row_col.index(str(i)+","+str(j))
correct_entry=(rebus_content[rebus_index]==cellblock[i][j])
else:
correct_entry=(solnblock[i][j]==cellblock[i][j])
if(not(correct_entry)):
if solnblock[i][j]!="." and solnblock[i][j]!=":":
pencil[i][j]=0
valid[i][j]=3
if((is_puz_rebus==True) and (str(i)+","+str(j) in rebus_row_col)):
rebus_index=rebus_row_col.index(str(i)+","+str(j))
cellblock[i][j]=rebus_content[rebus_index]
else:
cellblock[i][j]=solnblock[i][j]
# reveals a single letter in a word for a given clue
def reveal_one():
clue = input('Enter clue number (for e.g "1 across"): ')
if ('across' in clue):
num=acc
user_no=int(clue.replace(' across',''))
for i in range(0,acc):
if(user_no==int(across[i][0])):
num=i
if(num==acc):
print("No such clue number exists in across cluelist")
return
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
print(str(across[num][0])+". "+across[num][1]+" ("+str(across[num][2])+") : "+across[num][3])
getstr=input('Enter the location which has to be revealed in the word (for eg. in the word ABCDE, press 1 to reveal the letter in position A) : ')
loc=int(getstr)
if (loc>across[num][2] or loc<1):
print("Sorry location index is out of range")
return
c_col=c_col+(loc-1)
reveal(c_row,c_col)
print("The letter at the given location is : "+cellblock[c_row][c_col])
curstr=""
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
while((c_col<width) and (cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":")):
curstr=curstr+cellblock[c_row][c_col]
c_col=c_col+1
across[num][3]=curstr
calc_down()
return
if ('down' in clue):
num=dwn
user_no=int(clue.replace(' down',''))
for i in range(0,dwn):
if(user_no==int(down[i][0])):
num=i
if(num==dwn):
print("No such clue number exists in down cluelist")
return
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
print(str(down[num][0])+". "+down[num][1]+" ("+str(down[num][2])+") : "+down[num][3])
getstr=input('Enter the location which has to be checked in the word (for eg. in the word ABCDE, press 1 to reveal the letter in position A): ')
loc=int(getstr)
if (loc>down[num][2] or loc<1):
print("Sorry location index is out of range")
return
c_row=c_row+(loc-1)
reveal(c_row,c_col)
print("The letter at the given location is : "+cellblock[c_row][c_col])
curstr=""
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
while(c_row<height and (cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":")):
curstr=curstr+cellblock[c_row][c_col]
c_row=c_row+1
down[num][3]=curstr
calc_across()
return
print("Sorry wrong format")
# reveals the word for a given clue
def reveal_word():
ck_val=True
ad=0
clue = input('Enter clue number (for e.g "1 across"):')
if ('across' in clue):
num=acc
user_no=int(clue.replace(' across',''))
for i in range(0,acc):
if(user_no==int(across[i][0])):
num=i
if(num==acc):
print("No such clue number exists in across cluelist")
return
ad=1
if ('down' in clue):
num=dwn
user_no=int(clue.replace(' down',''))
for i in range(0,dwn):
if(user_no==int(down[i][0])):
num=i
if(num==dwn):
print("No such clue number exists in down cluelist")
return
ad=2
if (ad==0):
print("Sorry wrong format!")
return
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
text=""
while(cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":"):
reveal(c_row,c_col)
text=text+cellblock[c_row][c_col]
if(ad==1):
c_col=c_col+1
else:
c_row=c_row+1
if (c_row == height or c_col==width):
break
if(ad==1):
print("The word for the clue '"+across[num][1]+"' is : "+text)
else:
print("The word for the clue '"+down[num][1]+"' is : "+text)
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
if(ad==1):
across[num][3]=text
calc_down()
else:
down[num][3]=text
calc_across()
return
# reveals the complete solution
def reveal_sol():
text=""
for i in range(0,height):
for j in range(0,width):
reveal(i,j)
text=text+" "+cellblock[i][j]
j=j+1
text=text+"\n"
i=i+1
print("Solution Grid : ")
print(text)
calc_across()
calc_down()
return
# in locked puzzles, this function checks the validity of the key entered by the user.
def check_key(key):
global check_reveal_state,unlock_state,soln_state,checksum_sol
ab=unscramble_solution(soln.decode(Encoding_2), width, height, int(key))
temp=""
c=0
for j in range(0,width):
c=j
for i in range(0,height):
if(ab[c]!=":" and ab[c]!="."):
temp=temp+ab[c]
c=c+width
data=temp.encode(Encoding_2)
cksum=0
for c in data:
if (cksum & 0x0001):
cksum = ((cksum >> 1) | 0x8000)
else:
cksum = (cksum >> 1)
cksum = (cksum + c) & 0xffff
if (cksum==checksum_sol[0]):
print("The solution for the puzzle has been unlocked")
check_reveal_state="normal"
unlock_state="disabled"
soln_state[0]=0
checksum_sol[0]=0
temp=0
for i in range(0,height):
for j in range(0,width):
solnblock[i][j]=ab[temp]
temp=temp+1
else:
print("Sorry, Wrong key!")
# in locked puzzles, this function gets the key from the user, to unlock the solution.
def unlock_soln():
global key
key = input("Enter the 4 digit key : ")
check_key(key)
# overrides the IPUZ file with the current state of the puzzle
def save_sol():
temp_l=[]
for i in range(0,height):
if 'saved' not in puzzle:
temp_l.append([])
for j in range(0,width):
if cellblock[i][j]==".":
if 'saved' in puzzle:
if isinstance(puzzle['saved'][i][j],dict):
puzzle['saved'][i][j]['value']=block
else:
puzzle['saved'][i][j]=block
else:
temp_l[i].append(block)
elif cellblock[i][j]=="-":
if 'saved' in puzzle:
if isinstance(puzzle['saved'][i][j],dict):
puzzle['saved'][i][j]['value']=empty
else:
puzzle['saved'][i][j]=empty
else:
temp_l[i].append(empty)
else:
if 'saved' in puzzle:
if isinstance(puzzle['saved'][i][j],dict):
puzzle['saved'][i][j]['value']=cellblock[i][j]
else:
puzzle['saved'][i][j]=cellblock[i][j]
else:
temp_l[i].append(cellblock[i][j])
if 'saved' not in puzzle:
puzzle['saved']=temp_l
data = ipuz.write(puzzle, jsonp=True, callback_name="ipuz_function")
ofile=open(ifil,mode='w')
ofile.write(data)
ofile.close()
# saves the current state of the puzzle in binary format
def save_puz():
getloc=ofile_txt.split("/")
st=getloc[len(getloc)-1]
op=ofile_txt.replace(st,"")
split1=st.split(".")
newst=""
for i in range(0,(len(split1)-1)):
newst=newst+split1[i]
op=op+newst+".puz"
if 'title' in puzzle:
File.title=puzzle['title']
else:
File.title='title'
if 'author' in puzzle:
File.author=puzzle['author']
else:
File.author='author'
if 'copyright' in puzzle:
File.cpyrt=puzzle['copyright']
else:
File.cpyrt='copyright'
if 'notes' in puzzle:
File.notes=puzzle['notes']
else:
File.notes=''
File.width=width
File.height=height
File.solnblock=solnblock
File.cellblock=cellblock
File.acc=acc
File.dwn=dwn
File.across=across
File.down=down
File.loc=op
ipuz_Helper.filewrite(File)
# saves the current state of the puzzle as a text file
def save_txt():
getloc=ofile_txt.split("/")
st=getloc[len(getloc)-1]
op=ofile_txt.replace(st,"")
split1=st.split(".")
newst=""
for i in range(0,(len(split1)-1)):
newst=newst+split1[i]
op=op+newst+".txt"
col_space=[]
max_col=0
ofl=open(op,mode='wb')
ofl.write(("\n ").encode(Encoding_2))
ofl.write(title)
for j in range (0,width):
for i in range (0,height):
if (len(cellblock[i][j])>max_col):
max_col=len(cellblock[i][j])
col_space.append(max_col)
max_col=0
ofl.write(("\n\n\n Current State of the puzzle:\n\n ").encode(Encoding_2))
for i in range(0,height):
ofl.write(("\n ").encode(Encoding_2))
ad_space=0
for j in range(0,width):
if(cellblock[i][j]!=":"):
ofl.write(cellblock[i][j].encode(Encoding_2))
else:
ofl.write(".".encode(Encoding_2))
ad_space=col_space[j]-len(cellblock[i][j])
if ad_space>0:
for k in range(0,ad_space):
ofl.write((" ").encode(Encoding_2))
ofl.write((" ").encode(Encoding_2))
ofl.write(("\n\n CLUES\n").encode(Encoding_2))
ofl.write("\n Across : \n".encode(Encoding_2))
calc_across()
calc_down()
for i in range(0,acc):
ct=across[i][0]
r=row_cellno[ct-1]
c=col_cellno[ct-1]
temp=str(across[i][0])+". "+across[i][1]+" <"+across[i][3]+">"
ofl.write(("\n ").encode(Encoding_2))
ofl.write(temp.encode(Encoding_2))
ofl.write("\n\n Down :\n".encode(Encoding_2))
for i in range(0,dwn):
ct=down[i][0]
r=row_cellno[ct-1]
c=col_cellno[ct-1]
temp=str(down[i][0])+". "+down[i][1]+" <"+down[i][3]+">"
ofl.write(("\n ").encode(Encoding_2))
ofl.write(temp.encode(Encoding_2))
ofl.close()
time_state=1
ip=1
calc_across(0)
calc_down(0)
# performs actions corresponding to the option selected by the user
print('Enter 1 to Display the option menu anytime')
while(ip!=0):
ip = input('Enter your option: ')
if(ip=="1"):
if(unlock_state=="disabled"):
print(" 2 : Enter word for a clue (While entering letters for the word, press ',' key to repeat letters from the previous entry of the word eg. A,,DE)\n 3 : Enter rebus for a cell\n 4 : View all across clues\n 5 : View all down clues\n 6 : Clear cells\n 7 : Save\n 8 : View current state of the grid\n 9 : Check a letter, word or entire grid\n 10 : Reveal letter, word or entire solution grid")
else:
print(" 2 : Enter word for a clue\n 3 : Enter rebus for a cell\n 4 : View all across clues\n 5 : View all down clues\n 6 : Clear cells\n 7 : Save\n 8 : View current state of the grid\n 11 : Unlock solution")
if(notes_state=="normal"):
print(" 12 : Display notepad\n 0 : Exit")
else:
print(" 0 : Exit")
if(ip=="2"):
clue= input('Enter clue number (for e.g "1 across"): ')
disp_clue(clue)
is_sol_complete()
if(ip=="3"):
clue= input('Enter clue number (for e.g "1 across"): ')
disp_rebus_clue(clue)
is_sol_complete()
if(ip=="4"):
print('Across:')
view_acc()
if(ip=="5"):
print('Down:')
view_dwn()
if(ip=="6"):
clear_cells()
print('Cells Cleared!!')
if(ip=="7"):
choice=input(' 1 : Save work\n 2 : Save as .puz file\n 3 : Copy work to a text file\n')
if choice=="1":
save_sol()
print("Saved Work Succesfully!")
if choice=="2":
save_puz()
print("Saved Work Succesfully!")
if choice=="3":
save_txt()
print("Saved as text file succesfully!")
if(ip=="8"):
print('Current Block:')
view_cur()
if(ip=="9"):
if(unlock_state=="disabled"):
print('Enter your choice for checking blocks:')
choice=input(' 1 : Check letter\n 2 : Check word\n 3 : Check entire grid\n')
if choice=="1":
check_one()
if choice=="2":
check_word()
if choice=="3":
check_all()
else:
print("Sorry you must unlock the solution first to check or reveal the grid")
if(ip=="10"):
if(unlock_state=="disabled"):
print('Enter your choice for revealing blocks:')
choice=input(' 1 : Reveal letter\n 2 : Reveal word\n 3 : Reveal entire grid\n')
if choice=="1":
reveal_one()
is_sol_complete()
if choice=="2":
reveal_word()
is_sol_complete()
if choice=="3":
reveal_sol()
is_sol_complete()
else:
print("Sorry you must unlock the solution first to check or reveal the grid")
if(ip=="11"):
if(unlock_state=="normal"):
print('Unlock Solution:')
unlock_soln()
else:
print("The solution has already been unlocked!")
if(ip=="12"):
if(notes_state=="normal"):
print(notes.decode(Encoding_2))
else:
print("There are no notes available for this puzzle")
if(ip=="0"):
print("Thank you!!")
break
|
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import division
from builtins import str
from past.utils import old_div
import os
import sys
from astropy.coordinates import SkyCoord
import numpy as np
from math import log10
from . import tableio
def compute_AEBV(filter='r_SDSS', sed='flat'):
import bpz_tools
"""
Return A(lambda)/E(B-V) for a given filter response and SED. Any
SED can be used or a flat SED which is basically no SED
convolved.
Returns A(lambda)/A(V) and A(lambda)/E(B-v)
"""
Vfilter = 'V_Bessell'
Bfilter = 'B_Bessell'
AV = 0.1
# Get A(filter)
if sed == 'flat':
(xl, yf) = bpz_tools.get_filter(filter)
ysed = xl * 0.0 + 1.0
else:
(xl, ysed, yf) = bpz_tools.get_sednfilter(sed, filter)
xl = np.asarray(xl)
yf = np.asarray(yf)
ysed = np.array(ysed)
Ax = AAV_ccm(xl)
#A_lambda = flux(xl, Ax, yf, units='f_l')
mx_0 = -2.5 * log10(flux(xl, ysed, yf)) # Normal
mx_1 = -2.5 * log10(flux(xl, ysed * 10**(-0.4 * Ax * AV), yf)) # Redder
A_AV = old_div((mx_1 - mx_0), AV)
# Get A(V)
if sed == 'flat':
(xl, yf) = bpz_tools.get_filter(Vfilter)
ysed = xl * 0.0 + 1.0
else:
(xl, ysed, yf) = bpz_tools.get_sednfilter(sed, Vfilter)
xl = np.asarray(xl)
yf = np.asarray(yf)
ysed = np.array(ysed) * 0.0 + 1.0
Ax = AAV_ccm(xl)
#A_lambda = flux(xl, Ax, yf, units='f_l')
mV_0 = -2.5 * log10(flux(xl, ysed, yf)) # Normal
mV_1 = -2.5 * log10(flux(xl, ysed * 10**(-0.4 * Ax * AV), yf)) # Redder
# Get A(B)
if sed == 'flat':
(xl, yf) = bpz_tools.get_filter(Bfilter)
ysed = xl * 0.0 + 1.0
else:
(xl, ysed, yf) = bpz_tools.get_sednfilter(sed, Bfilter)
xl = np.asarray(xl)
yf = np.asarray(yf)
ysed = np.array(ysed) * 0.0 + 1.0
Ax = AAV_ccm(xl)
#A_lambda = flux(xl, Ax, yf, units='f_l')
mB_0 = -2.5 * log10(flux(xl, ysed, yf)) # Normal
mB_1 = -2.5 * log10(flux(xl, ysed * 10**(-0.4 * Ax * AV), yf)) # Redder
# Compute A/E(B-V)
A_EBV = old_div((mx_1 - mx_0), ((mB_1 - mV_1) - (mB_0 - mV_0)))
return A_AV, A_EBV, old_div(A_EBV, A_AV)
def flux(xsr, ys, yr, ccd='yes', units='nu'):
from scipy.integrate import trapz
from math import sqrt
""" Flux of spectrum ys observed through response yr,
both defined on xsr
Both f_nu and f_lambda have to be defined over lambda
If units=nu, it gives f_nu as the output
"""
clight_AHz = 2.99792458e18
if ccd == 'yes': yr = yr * xsr
norm = trapz(yr, xsr)
f_l = old_div(trapz(ys * yr, xsr), norm)
if units == 'nu':
# Pivotal Wavelength
lp = sqrt(old_div(norm, trapz(yr / xsr / xsr, xsr)))
return f_l * lp**2 / clight_AHz
else:
return f_l
def getEBV_old(ra, dec):
""" function recieves a coordinate pair, RA and DEC from the
caller, converts to galactic coords and runs the dust_getval
code, installed in the path. Returns an extinction correction
in magnitudes and an error object (a list of strings) of
possible reported problems with the region of the sky.
"""
# convert ra and dec to l,b using astutil.
# ra should be in hours (freaking iraf).
# we emulate pipes to Stdin and Stdout so we don't write no stinking files
#raanddec = [str(ra/15) +" "+str(dec)+" 2000"]
raanddec = [str(ra) + " " + str(dec) + " 2000"]
conversion = astutil.galactic(Stdin=raanddec, print_c="no", Stdout=1)
# conversion is a list of strings, this has only one element:
# eg. [' 227.5430 46.1912']
# which is l and b
gall = conversion[0].split()[0]
galb = conversion[0].split()[1]
# ok, we have l and b. now onto the extinction stuff.
# build the dust_val command line
cmd = "dust_getval " + gall + " " + galb + " interp=y verbose=n"
output = _calc_ebv(cmd)
# output is a list of strings, only one element in this case. looks like
# [' 227.543 46.191 0.03452\n']
# dust_getval returns the original coords and
# the extinction correction in mags
# which is the last piece of that string
eBV = output[0].split()[2]
# next run dust_getval with the mask option to look for anomolies in the maps
cmd = "dust_getval " + gall + " " + galb + " map=mask verbose=n"
mask = _calc_ebv(cmd)
# quality is a string of data quality flags returned by dust_getval when
# map=mask.
# looks like
# ' 227.543 46.191 3hcons OK OK OK OK OK OK \n'
quality = mask[1]
# return this with the extinction.
return eBV, quality
def get_EBV(ra, dec):
""" function recieves a coordinate pair, RA and DEC from the
caller, converts to galactic coords and runs the dust_getval
code, installed in the path. Returns an extinction correction
in magnitudes and an error object (a list of strings) of
possible reported problems with the region of the sky.
"""
rand_ID = np.random.randint(1e5)
coords_eq = "/tmp/coords_radec_{}.dat".format(rand_ID)
coords_lb = "/tmp/coords_galac_{}.dat".format(rand_ID)
eBVdata = "/tmp/eBV_{}.dat".format(rand_ID)
# Check that they are arrays
if isinstance(ra, float) or isinstance(dec, float):
ra = np.asarray(ra)
dec = np.asarray(dec)
if len(ra) != len(dec):
print("ERROR: RA,DEC must have same dimensions")
return
# convert ra and dec to l,b using astutil. ra should be in hours
# Write out a coords.dat file
tableio.put_data(coords_eq, (ra, dec), format="%10.6f %10.6f 2000")
# convert the ra/dec to l/b galactic coordinates
c = SkyCoord(ra, dec, frame='icrs', unit='deg')
tableio.put_data(coords_lb, (c.galactic.l.value, c.galactic.b.value),
format="%10.6f %10.6f 2000")
#astutil.galactic(coords_eq, print_c="no", Stdout=coords_lb)
# ok, we have l and b. now onto the extinction stuff. build the dust_val
# command line
cmd = "dust_getval infile=%s outfile=%s verbose=n interp=y" % (coords_lb,
eBVdata)
#_calc_ebv(cmd)
os.system(cmd)
# dust_getval returns the original coords and the extinction correction in
# mags eg. ['227.543 46.191 0.03452\n']
# Get the array of eBV values for each coordinate position
eBV = tableio.get_data(eBVdata, cols=(2, ))
# Remove the temporary files
os.system("rm %s %s %s" % (coords_eq, coords_lb, eBVdata))
return eBV
def filterFactor(filter):
"""caller passes an ACS filter of the form "DET_FILTER" and function
returns the extinction correction factor, a float, for that filter. Now
this function defines a dictionary of extinction correction factors
directly, but this also exists as a file in $PIPELINE/maps. It is
anticipated that these values will not change often, if at all, hence, the
dictionary is defined here rather than created on the fly from the file,
but that could be changed if it is anticipated that these numbers might
change a lot.
eg.
>>>filterFactor("HRC_F555W")
'3.24695724147'
"""
ffactors = {
"g_MOSAICII": 3.88489537829,
"r_MOSAICII": 2.78438802442,
"i_MOSAICII": 2.06519949822,
"z_MOSAICII": 1.39714057191,
"g": 3.88489537829,
"r": 2.78438802442,
"i": 2.06519949822,
"z": 1.39714057191,
"HST_ACS_WFC_F435W": 4.11697363315, # Values taken from APSIS
"HST_ACS_WFC_F555W": 3.24230342654,
"HST_ACS_WFC_F606W": 2.928500386,
"HST_ACS_WFC_F814W": 1.84740717341,
"HST_ACS_WFC_F475W": 3.74714182372,
"HST_ACS_WFC_F625W": 2.67121669327,
"HST_ACS_WFC_F775W": 2.01774028108,
"HST_ACS_WFC_F850LP": 1.47335876958,
"HST_ACS_WFC_F502N": 3.52366637215,
"HST_ACS_WFC_F892N": 1.51713294198,
"HST_ACS_WFC_F658N": 2.52193964747,
"HST_ACS_WFC_F550M": 3.05672088958,
"HST_ACS_HRC_F435W": 4.11227228078,
"HST_ACS_HRC_F555W": 3.24695724147,
"HST_ACS_HRC_F606W": 2.94741773243,
"HST_ACS_HRC_F814W": 1.82249557542,
"HST_ACS_HRC_F475W": 3.724544959,
"HST_ACS_HRC_F625W": 2.67859346295,
"HST_ACS_HRC_F775W": 2.02818977096,
"HST_ACS_HRC_F850LP": 1.44407689634,
"HST_ACS_HRC_F344N": 5.10086305785,
"HST_ACS_HRC_F502N": 3.52410034736,
"HST_ACS_HRC_F892N": 1.5170227107,
"HST_ACS_HRC_F658N": 2.52245685895,
"HST_ACS_HRC_F220W": 8.81083859281,
"HST_ACS_HRC_F250W": 6.52297815722,
"HST_ACS_HRC_F330W": 5.17376227866,
"HST_ACS_HRC_F550M": 3.05823161415,
# Geneated runnning:
#deredden.compute_AEBV(filter='R_SPECIAL_FORS2',sed='flat')[1]
#deredden.compute_AEBV(filter='I_BESS_FORS2',sed='flat')[1]
#deredden.compute_AEBV(filter='z_GUNN_FORS2',sed='flat')[1]
"R_SPECIAL": 2.6355966034891507,
"I_BESS": 1.9785822369228967,
"z_GUNN": 1.2740325163787274,
# To handle the fact that there is no coverage for the IRAC CH1 and CH2
# band
"CH1": 0.0,
"CH2": 0.0,
}
try:
return ffactors[filter]
except KeyError:
return compute_AEBV(filter='K_KittPeak', sed='flat')[1]
##############################################################################
def _calc_ebv(cmd):
sproc = popen2.Popen3(cmd, 1)
output = sproc.fromchild.readlines()
errs = sproc.childerr.readlines()
return output
# Compute A(lambda)/A(V) using the prescription from Cardelli, Clayton
# & Mathis (1998) uptaed in the optical-NIR using O'Donnell (1994).
def AAV_ccm(wavelength, rv=3.1):
land = np.logical_and
# Convert to inverse microns
x = old_div(10000.0, wavelength)
a = x * 0.0
b = x * 0.0
# Compute a(x) and b(x) for all cases
# Case 1: x < 0.3
ix = np.where(x < 0.3)
Nsel = len(ix[0])
if Nsel > 0:
sys.exit("Wavelength out of range of extinction function")
# Case 2: Infrared 0.3< x<1.1
ix = np.where(land(x > 0.3, x <= 1.1))
Nsel = len(ix[0])
if Nsel > 0:
y = x[ix]
a[ix] = 0.574 * y**1.61
b[ix] = -0.527 * y**1.61
# Case 3; Optical/NIR 1.1 < x <= 3.3
ix = np.where(land(x > 1.1, x <= 3.3))
Nsel = len(ix[0])
if Nsel > 0:
y = x[ix] - 1.82
# Carelli fit
#a[ix] = 1.0 + 0.17699*y - 0.50447*y**2 - 0.02427*y**3 + 0.72085*y**4 +
#0.01979*y**5 - 0.77530*y**6 + 0.32999*y**7
#b[ix] = 1.41338*y + 2.28305*y**2 + 1.07233*y**3 - 5.38434*y**4 -
#0.62251*y**5 + 5.30260*y**6 - 2.09002*y**7
# O'Donnell fit
a[ix] = (1.0 + 0.104 * y - 0.609 * y**2 + 0.701 * y**3 + 1.137 *
y**4 - 1.718 * y**5 - 0.827 * y**6 + 1.647 * y**7 - 0.505 *
y**8)
b[ix] = (1.952 * y + 2.908 * y**2 - 3.989 * y**3 - 7.985 * y**4 +
11.102 * y**5 + 5.491 * y**6 - 10.805 * y**7 + 3.347 * y**8)
# Case 4: Mid-UV 3.3 < x <= 5.9
ix = np.where(land(x > 3.3, x <= 5.9))
Nsel = len(ix[0])
if Nsel > 0:
y = (x[ix] - 4.67)**2
a[ix] = 1.752 - 0.316 * x[ix] - old_div(0.104, (y + 0.341))
b[ix] = -3.090 + 1.825 * x[ix] + old_div(1.206, (y + 0.263))
# Case 4: 5.9 < x < 8.0
ix = np.where(land(x > 5.9, x <= 8.0))
Nsel = len(ix[0])
if Nsel > 0:
y = (x[ix] - 4.67)**2
a[ix] = 1.752 - 0.316 * x[ix] - old_div(0.104, (y + 0.341))
b[ix] = -3.090 + 1.825 * x[ix] + old_div(1.206, (y + 0.263))
y = x[ix] - 5.9
a[ix] = a[ix] - 0.04473 * y**2 - 0.009779 * y**3
b[ix] = b[ix] + 0.21300 * y**2 + 0.120700 * y**3
# Case 5: 8 < x < 11 ; Far-UV
ix = np.where(land(x > 8.0, x <= 11.0))
Nsel = len(ix[0])
if Nsel > 0:
y = x[ix] - 8.0
a[ix] = -1.072 - 0.628 * y + 0.137 * y**2 - 0.070 * y**3
b[ix] = 13.670 + 4.257 * y - 0.420 * y**2 + 0.374 * y**3
# Compute A(lambda)/A(V)
AAV = a + old_div(b, rv)
return AAV
# Compute values for the MOSAICII filters
def MOSAICII():
filters = ('g_MOSAICII', 'r_MOSAICII', 'i_MOSAICII', 'z_MOSAICII')
for filter in filters:
print(filter, compute_AEBV(filter, 'flat')[1])
|
|
from __future__ import absolute_import
from __future__ import with_statement
import sys
import socket
from datetime import datetime, timedelta
from kombu import pidbox
from mock import Mock, patch
from celery import current_app
from celery.datastructures import AttributeDict
from celery.task import task
from celery.utils import uuid
from celery.utils.timer2 import Timer
from celery.worker import WorkController as _WC
from celery.worker import consumer
from celery.worker import control
from celery.worker import state
from celery.worker.buckets import FastQueue
from celery.worker.job import TaskRequest
from celery.worker.state import revoked
from celery.worker.control import Panel
from celery.tests.utils import Case
hostname = socket.gethostname()
@task(rate_limit=200) # for extra info in dump_tasks
def mytask():
pass
class WorkController(object):
autoscaler = None
class Consumer(consumer.Consumer):
def __init__(self):
self.ready_queue = FastQueue()
self.timer = Timer()
self.app = current_app
self.event_dispatcher = Mock()
self.controller = WorkController()
self.task_consumer = Mock()
from celery.concurrency.base import BasePool
self.pool = BasePool(10)
@property
def info(self):
return {'xyz': 'XYZ'}
class test_ControlPanel(Case):
def setUp(self):
self.app = current_app
self.panel = self.create_panel(consumer=Consumer())
def create_state(self, **kwargs):
kwargs.setdefault('app', self.app)
return AttributeDict(kwargs)
def create_panel(self, **kwargs):
return self.app.control.mailbox.Node(hostname=hostname,
state=self.create_state(**kwargs),
handlers=Panel.data)
def test_enable_events(self):
consumer = Consumer()
panel = self.create_panel(consumer=consumer)
consumer.event_dispatcher.enabled = False
panel.handle('enable_events')
self.assertTrue(consumer.event_dispatcher.enable.call_count)
self.assertIn(
('worker-online', ),
consumer.event_dispatcher.send.call_args,
)
consumer.event_dispatcher.enabled = True
self.assertIn('already enabled', panel.handle('enable_events')['ok'])
def test_disable_events(self):
consumer = Consumer()
panel = self.create_panel(consumer=consumer)
consumer.event_dispatcher.enabled = True
panel.handle('disable_events')
self.assertTrue(consumer.event_dispatcher.disable.call_count)
self.assertIn(('worker-offline', ),
consumer.event_dispatcher.send.call_args)
consumer.event_dispatcher.enabled = False
self.assertIn('already disabled', panel.handle('disable_events')['ok'])
def test_heartbeat(self):
consumer = Consumer()
panel = self.create_panel(consumer=consumer)
consumer.event_dispatcher.enabled = True
panel.handle('heartbeat')
self.assertIn(('worker-heartbeat', ),
consumer.event_dispatcher.send.call_args)
def test_time_limit(self):
panel = self.create_panel(consumer=Mock())
th, ts = mytask.time_limit, mytask.soft_time_limit
try:
r = panel.handle('time_limit', arguments=dict(
task_name=mytask.name, hard=30, soft=10))
self.assertEqual((mytask.time_limit, mytask.soft_time_limit),
(30, 10))
self.assertIn('ok', r)
r = panel.handle('time_limit', arguments=dict(
task_name=mytask.name, hard=None, soft=None))
self.assertEqual((mytask.time_limit, mytask.soft_time_limit),
(None, None))
self.assertIn('ok', r)
r = panel.handle('time_limit', arguments=dict(
task_name='248e8afya9s8dh921eh928', hard=30))
self.assertIn('error', r)
finally:
mytask.time_limit, mytask.soft_time_limit = th, ts
def test_active_queues(self):
import kombu
x = kombu.Consumer(current_app.connection(),
[kombu.Queue('foo', kombu.Exchange('foo'), 'foo'),
kombu.Queue('bar', kombu.Exchange('bar'), 'bar')],
auto_declare=False)
consumer = Mock()
consumer.task_consumer = x
panel = self.create_panel(consumer=consumer)
r = panel.handle('active_queues')
self.assertListEqual(list(sorted(q['name'] for q in r)),
['bar', 'foo'])
def test_dump_tasks(self):
info = '\n'.join(self.panel.handle('dump_tasks'))
self.assertIn('mytask', info)
self.assertIn('rate_limit=200', info)
def test_stats(self):
prev_count, state.total_count = state.total_count, 100
try:
self.assertDictContainsSubset({'total': 100,
'consumer': {'xyz': 'XYZ'}},
self.panel.handle('stats'))
self.panel.state.consumer = Mock()
self.panel.handle('stats')
self.assertTrue(
self.panel.state.consumer.controller.autoscaler.info.called)
finally:
state.total_count = prev_count
def test_report(self):
self.panel.handle('report')
def test_active(self):
r = TaskRequest(mytask.name, 'do re mi', (), {})
state.active_requests.add(r)
try:
self.assertTrue(self.panel.handle('dump_active'))
finally:
state.active_requests.discard(r)
def test_pool_grow(self):
class MockPool(object):
def __init__(self, size=1):
self.size = size
def grow(self, n=1):
self.size += n
def shrink(self, n=1):
self.size -= n
consumer = Consumer()
consumer.pool = MockPool()
panel = self.create_panel(consumer=consumer)
panel.handle('pool_grow')
self.assertEqual(consumer.pool.size, 2)
panel.handle('pool_shrink')
self.assertEqual(consumer.pool.size, 1)
panel.state.consumer = Mock()
panel.state.consumer.controller = Mock()
sc = panel.state.consumer.controller.autoscaler = Mock()
panel.handle('pool_grow')
self.assertTrue(sc.force_scale_up.called)
panel.handle('pool_shrink')
self.assertTrue(sc.force_scale_down.called)
def test_add__cancel_consumer(self):
class MockConsumer(object):
queues = []
cancelled = []
consuming = False
def add_queue(self, queue):
self.queues.append(queue.name)
def consume(self):
self.consuming = True
def cancel_by_queue(self, queue):
self.cancelled.append(queue)
def consuming_from(self, queue):
return queue in self.queues
consumer = Consumer()
consumer.task_consumer = MockConsumer()
panel = self.create_panel(consumer=consumer)
panel.handle('add_consumer', {'queue': 'MyQueue'})
self.assertIn('MyQueue', consumer.task_consumer.queues)
self.assertTrue(consumer.task_consumer.consuming)
panel.handle('add_consumer', {'queue': 'MyQueue'})
panel.handle('cancel_consumer', {'queue': 'MyQueue'})
self.assertIn('MyQueue', consumer.task_consumer.cancelled)
def test_revoked(self):
state.revoked.clear()
state.revoked.add('a1')
state.revoked.add('a2')
try:
self.assertEqual(sorted(self.panel.handle('dump_revoked')),
['a1', 'a2'])
finally:
state.revoked.clear()
def test_dump_schedule(self):
consumer = Consumer()
panel = self.create_panel(consumer=consumer)
self.assertFalse(panel.handle('dump_schedule'))
r = TaskRequest(mytask.name, 'CAFEBABE', (), {})
consumer.timer.schedule.enter(
consumer.timer.Entry(lambda x: x, (r, )),
datetime.now() + timedelta(seconds=10))
self.assertTrue(panel.handle('dump_schedule'))
def test_dump_reserved(self):
from celery.worker import state
consumer = Consumer()
state.reserved_requests.add(
TaskRequest(mytask.name, uuid(), args=(2, 2), kwargs={}),
)
try:
panel = self.create_panel(consumer=consumer)
response = panel.handle('dump_reserved', {'safe': True})
self.assertDictContainsSubset(
{'name': mytask.name,
'args': (2, 2),
'kwargs': {},
'hostname': socket.gethostname()},
response[0],
)
state.reserved_requests.clear()
self.assertFalse(panel.handle('dump_reserved'))
finally:
state.reserved_requests.clear()
def test_rate_limit_when_disabled(self):
app = current_app
app.conf.CELERY_DISABLE_RATE_LIMITS = True
try:
e = self.panel.handle(
'rate_limit',
arguments={'task_name': mytask.name,
'rate_limit': '100/m'})
self.assertIn('rate limits disabled', e.get('error'))
finally:
app.conf.CELERY_DISABLE_RATE_LIMITS = False
def test_rate_limit_invalid_rate_limit_string(self):
e = self.panel.handle('rate_limit', arguments=dict(
task_name='tasks.add', rate_limit='x1240301#%!'))
self.assertIn('Invalid rate limit string', e.get('error'))
def test_rate_limit(self):
class Consumer(object):
class ReadyQueue(object):
fresh = False
def refresh(self):
self.fresh = True
def __init__(self):
self.ready_queue = self.ReadyQueue()
consumer = Consumer()
panel = self.create_panel(app=current_app, consumer=consumer)
task = current_app.tasks[mytask.name]
old_rate_limit = task.rate_limit
try:
panel.handle('rate_limit', arguments=dict(task_name=task.name,
rate_limit='100/m'))
self.assertEqual(task.rate_limit, '100/m')
self.assertTrue(consumer.ready_queue.fresh)
consumer.ready_queue.fresh = False
panel.handle('rate_limit', arguments=dict(task_name=task.name,
rate_limit=0))
self.assertEqual(task.rate_limit, 0)
self.assertTrue(consumer.ready_queue.fresh)
finally:
task.rate_limit = old_rate_limit
def test_rate_limit_nonexistant_task(self):
self.panel.handle('rate_limit', arguments={
'task_name': 'xxxx.does.not.exist',
'rate_limit': '1000/s'})
def test_unexposed_command(self):
with self.assertRaises(KeyError):
self.panel.handle('foo', arguments={})
def test_revoke_with_name(self):
tid = uuid()
m = {'method': 'revoke',
'destination': hostname,
'arguments': {'task_id': tid,
'task_name': mytask.name}}
self.panel.handle_message(m, None)
self.assertIn(tid, revoked)
def test_revoke_with_name_not_in_registry(self):
tid = uuid()
m = {'method': 'revoke',
'destination': hostname,
'arguments': {'task_id': tid,
'task_name': 'xxxxxxxxx33333333388888'}}
self.panel.handle_message(m, None)
self.assertIn(tid, revoked)
def test_revoke(self):
tid = uuid()
m = {'method': 'revoke',
'destination': hostname,
'arguments': {'task_id': tid}}
self.panel.handle_message(m, None)
self.assertIn(tid, revoked)
m = {'method': 'revoke',
'destination': 'does.not.exist',
'arguments': {'task_id': tid + 'xxx'}}
self.panel.handle_message(m, None)
self.assertNotIn(tid + 'xxx', revoked)
def test_revoke_terminate(self):
request = Mock()
request.id = tid = uuid()
state.reserved_requests.add(request)
try:
r = control.revoke(Mock(), tid, terminate=True)
self.assertIn(tid, revoked)
self.assertTrue(request.terminate.call_count)
self.assertIn('terminating', r['ok'])
# unknown task id only revokes
r = control.revoke(Mock(), uuid(), terminate=True)
self.assertIn('not found', r['ok'])
finally:
state.reserved_requests.discard(request)
def test_autoscale(self):
self.panel.state.consumer = Mock()
self.panel.state.consumer.controller = Mock()
sc = self.panel.state.consumer.controller.autoscaler = Mock()
sc.update.return_value = 10, 2
m = {'method': 'autoscale',
'destination': hostname,
'arguments': {'max': '10', 'min': '2'}}
r = self.panel.handle_message(m, None)
self.assertIn('ok', r)
self.panel.state.consumer.controller.autoscaler = None
r = self.panel.handle_message(m, None)
self.assertIn('error', r)
def test_ping(self):
m = {'method': 'ping',
'destination': hostname}
r = self.panel.handle_message(m, None)
self.assertEqual(r, 'pong')
def test_shutdown(self):
m = {'method': 'shutdown',
'destination': hostname}
with self.assertRaises(SystemExit):
self.panel.handle_message(m, None)
def test_panel_reply(self):
replies = []
class _Node(pidbox.Node):
def reply(self, data, exchange, routing_key, **kwargs):
replies.append(data)
panel = _Node(hostname=hostname,
state=self.create_state(consumer=Consumer()),
handlers=Panel.data,
mailbox=self.app.control.mailbox)
r = panel.dispatch('ping', reply_to={'exchange': 'x',
'routing_key': 'x'})
self.assertEqual(r, 'pong')
self.assertDictEqual(replies[0], {panel.hostname: 'pong'})
def test_pool_restart(self):
consumer = Consumer()
consumer.controller = _WC(app=current_app)
consumer.controller.pool.restart = Mock()
panel = self.create_panel(consumer=consumer)
panel.app = self.app
_import = panel.app.loader.import_from_cwd = Mock()
_reload = Mock()
panel.handle('pool_restart', {'reloader': _reload})
self.assertTrue(consumer.controller.pool.restart.called)
self.assertFalse(_reload.called)
self.assertFalse(_import.called)
def test_pool_restart_import_modules(self):
consumer = Consumer()
consumer.controller = _WC(app=current_app)
consumer.controller.pool.restart = Mock()
panel = self.create_panel(consumer=consumer)
panel.app = self.app
_import = consumer.controller.app.loader.import_from_cwd = Mock()
_reload = Mock()
panel.handle('pool_restart', {'modules': ['foo', 'bar'],
'reloader': _reload})
self.assertTrue(consumer.controller.pool.restart.called)
self.assertFalse(_reload.called)
self.assertEqual(
[(('foo',), {}), (('bar',), {})],
_import.call_args_list,
)
def test_pool_restart_relaod_modules(self):
consumer = Consumer()
consumer.controller = _WC(app=current_app)
consumer.controller.pool.restart = Mock()
panel = self.create_panel(consumer=consumer)
panel.app = self.app
_import = panel.app.loader.import_from_cwd = Mock()
_reload = Mock()
with patch.dict(sys.modules, {'foo': None}):
panel.handle('pool_restart', {'modules': ['foo'],
'reload': False,
'reloader': _reload})
self.assertTrue(consumer.controller.pool.restart.called)
self.assertFalse(_reload.called)
self.assertFalse(_import.called)
_import.reset_mock()
_reload.reset_mock()
consumer.controller.pool.restart.reset_mock()
panel.handle('pool_restart', {'modules': ['foo'],
'reload': True,
'reloader': _reload})
self.assertTrue(consumer.controller.pool.restart.called)
self.assertTrue(_reload.called)
self.assertFalse(_import.called)
|
|
# -*- coding: utf-8 -*-
import datetime
import constants
# Choose and configure the browser of your choice
def get_browser():
return webdriver.Chrome()
# The host and port where the tested ap shoud listen.
HOST = '127.0.0.1'
PORT = 8080
# The host alias set in the /etc/hosts file.
# The actual tests will navigate selenium browser to this host.
# This is necessary because some providers don't support localhost as the
# callback url.
HOST_ALIAS = 'authomatic.com'
# Only providers included here will be tested.
# This is a convenience to easily exclude providers from tests by commenting
# them out.
INCLUDE_PROVIDERS = [
'behance',
'bitly',
'deviantart',
'facebook',
'foursquare',
'google',
'github',
'linkedin',
'paypal',
'reddit',
'vk',
'windowslive',
'yammer',
'yandex',
]
# Use these constants if you have the same user info by all tested providers.
EMAIL = 'andy.pipkin@littlebritain.co.uk'
FIRST_NAME = 'Andy'
LAST_NAME = 'Pipkin'
NAME = FIRST_NAME + ' ' + LAST_NAME
USERNAME = 'andypipkin'
USERNAME_REVERSE = 'pipkinandy'
NICKNAME = 'Mr. Pipkin'
BIRTH_YEAR = '1979'
BIRTH_DATE = str(datetime.datetime(1979, 12, 31))
CITY = 'London'
COUNTRY = 'Great Britain'
POSTAL_CODE = 'EC1A1DH'
PHONE = '??????????'
PHONE_INTERNATIONAL = '0044??????????'
GENDER = constants.GENDER_MALE
LOCALE = 'en_UK'
# Common values for all providers
COMMON = {
# Could be same if the user sets it so
'user_birth_date': BIRTH_DATE,
'user_login': EMAIL,
'user_email': EMAIL,
'user_first_name': FIRST_NAME,
'user_last_name': LAST_NAME,
'user_name': NAME,
'user_username': USERNAME,
'user_username_reverse': USERNAME_REVERSE,
'user_nickname': NICKNAME,
'user_birth_year': BIRTH_YEAR,
'user_city': CITY,
'user_country': COUNTRY,
'user_gender': GENDER,
'user_phone': PHONE,
'user_postal_code': POSTAL_CODE,
'user_locale': LOCALE,
# It is not a good idea to have the same password for all providers
# 'user_password': '##########',
# Provider and user specific value
# 'user_id': '',
# 'user_locale': None,
# 'user_timezone': None,
# Provider specific format
# 'user_picture': '',
# 'user_link': '',
# Provider specific value
# 'consumer_key': '',
# 'consumer_secret': '',
}
# Values from COMMON will be overriden by values from PROVIDERS[provider_name]
# if set.
PROVIDERS = {
'behance': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
},
'bitly': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
},
'deviantart': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
},
'facebook': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
# This value changes when switching from and to Daylight Saving Time
'user_timezone': '??????????',
},
'foursquare': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
# The picture URL is a random CDN URL
'user_picture': '??????????',
},
'google': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
'user_locale': '??????????',
# The picture URL is a random CDN URL
'user_picture': '??????????',
},
'github': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
# GitHub requires the User-Agent header in every request.
'access_headers': {'User-Agent': ('Authomatic.py Automated Functional '
'Tests')},
},
'linkedin': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
# User link contains a slug derived from the username.
'user_link': 'http://www.linkedin.com/in/??????????',
# GitHub requires the User-Agent header in every request.
'user_picture': '??????????',
'user_phone': PHONE_INTERNATIONAL,
},
'paypal': {
'consumer_key': '##########',
'consumer_secret': '##########',
},
'reddit': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_login': USERNAME,
'user_id': '??????????',
'access_headers': {'User-Agent': ('Authomatic.py Automated Functional '
'Tests')}
},
# Viadeo doesn't support access to its API
# http://dev.viadeo.com/documentation/authentication/request-an-api-key/
# 'viadeo': {
# 'consumer_key': '##########',
# 'consumer_secret': '##########',
# },
'vk': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
# City and country are numeric IDs
'user_city': '??????????',
'user_country': '??????????',
'user_gender': '2',
'user_timezone': '1',
},
'windowslive': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
},
'yammer': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_id': '??????????',
'user_picture': ('https://mug0.assets-yammer.com/mugshot/images/48x48/'
'??????????'),
'user_timezone': '??????????',
'user_locale': '??????????',
},
'yandex': {
'consumer_key': '##########',
'consumer_secret': '##########',
'user_password': '##########',
'user_login': USERNAME,
'user_id': '??????????',
},
}
|
|
import sys
import os
import resource
import shutil
import shlex
import time
import subprocess
import random
# this is a disk I/O benchmark script. It runs menchmarks
# over different filesystems, different cache sizes and
# different number of peers (can be used to find a reasonable
# range for unchoke slots).
# it also measures performance improvements of re-ordering
# read requests based on physical location and OS hints
# like posix_fadvice(FADV_WILLNEED). It can also be used
# for the AIO branch to measure improvements over the
# classic thread based disk I/O
# to set up the test, build the example directory in release
# with statistics=on and copy fragmentation_test, client_test
# and connection_tester to a directory called 'stage' (or make
# a symbolic link to the bjam output directory).
# make sure gnuplot is installed.
# the following lists define the space tests will be run in
# variables to test. All these are run on the first
# entry in the filesystem list.
cache_sizes = [0, 512, 1024, 2048, 4096, 8192, 16384]
peers = [10, 100, 500, 1000, 2000]
# the drives are assumed to be mounted under ./<name>
# or have symbolic links to them.
filesystem = ['ext4', 'ext3', 'reiser', 'xfs']
# the number of peers for the filesystem test. The
# idea is to stress test the filesystem by using a lot
# of peers, since each peer essentially is a separate
# read location on the platter
filesystem_peers = 200
# the amount of cache for the filesystem test
filesystem_cache = 8192
# the number of seconds to run each test. It's important that
# this is shorter than what it takes to finish downloading
# the test torrent, since then the average rate will not
# be representative of the peak anymore
test_duration = 100
# make sure the environment is properly set up
if resource.getrlimit(resource.RLIMIT_NOFILE)[0] < 4000:
print 'please set ulimit -n to at least 4000'
sys.exit(1)
# make sure we have all the binaries available
binaries = ['stage/client_test', 'stage/connection_tester', 'stage/fragmentation_test']
for i in binaries:
if not os.path.exists(i):
print 'make sure "%s" is available in current working directory' % i
sys.exit(1)
for i in filesystem:
if not os.path.exists(i):
print ('the path "%s" does not exist. This is directory/mountpoint is ' +
'used as the download directory and is the filesystem that will be benchmarked ' +
'and need to exist.') % i
sys.exit(1)
# make sure we have a test torrent
if not os.path.exists('test.torrent'):
print 'generating test torrent'
os.system('./stage/connection_tester gen-torrent test.torrent')
# use a new port for each test to make sure they keep working
# this port is incremented for each test run
port = 10000 + random.randint(0, 5000)
def build_commandline(config, port):
num_peers = config['num-peers']
no_disk_reorder = '';
if config['allow-disk-reorder'] == False:
no_disk_reorder = '-O'
no_read_ahead = ''
if config['read-ahead'] == False:
no_read_ahead = '-j'
allocation_mode = config['allocation-mode']
global test_duration
return './stage/client_test -k -z -N -h -H -M -B %d -l %d -S %d -T %d -c %d -C %d -s "%s" %s %s -q %d -p %d -f session_stats/alerts_log.txt -a %s test.torrent' \
% (test_duration, num_peers, num_peers, num_peers, num_peers, config['cache-size'], config['save-path'] \
, no_disk_reorder, no_read_ahead, test_duration, port, config['allocation-mode'])
def delete_files(files):
for i in files:
try: os.remove(i)
except:
try: shutil.rmtree(i)
except: pass
def build_test_config(fs, num_peers, cache_size, readahead=True, reorder=True, preallocate=False):
config = {'test': 'dual', 'save-path': os.path.join('./', fs), 'num-peers': num_peers, 'allow-disk-reorder': reorder, 'cache-size': cache_size, 'read-ahead': readahead}
if preallocate: config['allocation-mode'] = 'allocate'
else: config['allocation-mode'] = 'sparse'
return config
def build_target_folder(config):
reorder = 'reorder'
if config['allow-disk-reorder'] == False: reorder = 'no-reorder'
readahead = 'readahead'
if config['read-ahead'] == False: readahead = 'no-readahead'
return 'results_%d_%d_%s_%s_%s_%s_%s' % (config['num-peers'], config['cache-size'], os.path.split(config['save-path'])[1], config['test'], reorder, readahead, config['allocation-mode'])
def run_test(config):
if os.path.exists(build_target_folder(config)):
print 'results already exists, skipping test'
return
# make sure any previous test file is removed
delete_files([os.path.join(config['save-path'], 'stress_test_file'), '.ses_state', os.path.join(config['save-path'], '.resume'), '.dht_state', 'session_stats'])
try: os.mkdir('session_stats')
except: pass
# save off the command line for reference
global port
cmdline = build_commandline(config, port)
f = open('session_stats/cmdline.txt', 'w+')
f.write(cmdline)
f.close()
f = open('session_stats/config.txt', 'w+')
print >>f, config
f.close()
print '\n\n*********************************'
print '* RUNNING TEST *'
print '*********************************\n\n'
client_output = open('session_stats/client.output', 'w+')
print 'launching: %s' % cmdline
client = subprocess.Popen(shlex.split(cmdline), stdout=client_output, stdin=subprocess.PIPE)
# enable disk stats printing
print >>client.stdin, 'x',
time.sleep(1)
cmdline = './stage/connection_tester %s %d 127.0.0.1 %d test.torrent' % (config['test'], config['num-peers'], port)
print 'launching: %s' % cmdline
tester_output = open('session_stats/tester.output', 'w+')
tester = subprocess.Popen(shlex.split(cmdline), stdout=tester_output)
tester.wait()
client.wait()
tester_output.close()
client_output.close()
if tester.returncode != 0: sys.exit(tester.returncode)
if client.returncode != 0: sys.exit(client.returncode)
# run fragmentation test
print 'analyzing fragmentation'
os.system('./stage/fragmentation_test test.torrent %s' % config['save-path'])
shutil.copy('fragmentation.log', 'session_stats/')
shutil.copy('fragmentation.png', 'session_stats/')
shutil.copy('fragmentation.gnuplot', 'session_stats/')
os.chdir('session_stats')
# parse session stats
print 'parsing session log'
os.system('python ../../parse_session_stats.py *.0000.log')
os.chdir('..')
# move the results into its final place
print 'saving results'
os.rename('session_stats', build_target_folder(config))
# clean up
print 'cleaning up'
delete_files([os.path.join(config['save-path'], 'stress_test_file'), '.ses_state', os.path.join(config['save-path'], '.resume'), '.dht_state'])
port += 1
for fs in filesystem:
for preallocate in [True, False]:
config = build_test_config(fs, filesystem_peers, filesystem_cache, preallocate)
run_test(config)
for c in cache_sizes:
for p in peers:
for rdahead in [True, False]:
# for reorder in [True, False]:
reorder = True
for preallocate in [True, False]:
config = build_test_config(filesystem[0], p, c, rdahead, reorder, preallocate)
run_test(config)
|
|
from .fixtures import *
def test_regex_operator(backend, small_test_data):
backend.filter(Actor, {}).delete()
marlon_brando = Actor({'name': 'Marlon Brando', 'gross_income_m': 1.453, 'appearances': 78, 'is_funny': False, 'birth_year': 1924})
marlon_wayans = Actor({'name': 'Marlon Wayans'})
backend.save(marlon_brando)
backend.save(marlon_wayans)
backend.commit()
assert backend.get(Actor, {'name': {'$regex': r'^Marlon\s+(?!Wayans)[\w]+$'}}) == marlon_brando
assert len(backend.filter(Actor, {'name': {'$regex': r'^Marlon\s+.*$'}})) == 2
assert len(backend.filter(Actor, {'name': {'$regex': r'^.*\s+Brando$'}})) == 1
def test_in(backend):
# DB setup
backend.filter(Actor, {}).delete()
marlon_brando = Actor({'name': 'Marlon Brando', 'gross_income_m': 1.453, 'appearances': 78, 'is_funny': False, 'birth_year': 1924})
leonardo_di_caprio = Actor({'name': 'Leonardo di Caprio', 'gross_income_m': 12.453, 'appearances': 34, 'is_funny': 'it depends', 'birth_year': 1974})
david_hasselhoff = Actor({'name': 'David Hasselhoff', 'gross_income_m': 12.453, 'appearances': 173, 'is_funny': True, 'birth_year': 1952})
charlie_chaplin = Actor({'name': 'Charlie Chaplin', 'gross_income_m': 0.371, 'appearances': 473, 'is_funny': True, 'birth_year': 1889})
backend.save(marlon_brando)
backend.save(leonardo_di_caprio)
backend.save(david_hasselhoff)
backend.save(charlie_chaplin)
backend.commit()
assert len(backend.filter(Actor, {})) == 4
# DB setup
# Test with empty list
query = {'name': {'$not': {'$in': []}}}
assert len(backend.filter(Actor, query)) == len([david_hasselhoff, charlie_chaplin, marlon_brando, leonardo_di_caprio])
query = {'name': {'$in': []}}
assert len(backend.filter(Actor, query)) == len([])
# Test with empty list
# Test with one match
query = {'name': {'$in': [david_hasselhoff.name]}}
assert len(backend.filter(Actor, query)) == len([david_hasselhoff])
# Test with one match
# Test with unknown elements
query = {'name': {'$in': ['jackie chan']}}
assert len(backend.filter(Actor, query)) == len([])
# Test with unknown elements
# Test with different types
query = {'name': {'$in': [david_hasselhoff.name, True]}}
assert len(backend.filter(Actor, query)) == len([david_hasselhoff])
# Test with different types
def test_lt(backend):
# DB setup
backend.filter(Actor, {}).delete()
marlon_brando = Actor({'name': 'Marlon Brando', 'gross_income_m': 1.453, 'appearances': 78, 'is_funny': False, 'birth_year': 1924})
leonardo_di_caprio = Actor({'name': 'Leonardo di Caprio', 'gross_income_m': 12.453, 'appearances': 34, 'is_funny': 'it depends', 'birth_year': 1974})
david_hasselhoff = Actor({'name': 'David Hasselhoff', 'gross_income_m': 12.453, 'appearances': 173, 'is_funny': True, 'birth_year': 1952})
charlie_chaplin = Actor({'name': 'Charlie Chaplin', 'gross_income_m': 0.371, 'appearances': 473, 'is_funny': True, 'birth_year': 1889})
backend.save(marlon_brando)
backend.save(leonardo_di_caprio)
backend.save(david_hasselhoff)
backend.save(charlie_chaplin)
backend.commit()
assert len(backend.filter(Actor, {})) == 4
# DB setup
# Test with String
query = {'name': {'$lt': marlon_brando.name}}
assert len(backend.filter(Actor, query)) == len([charlie_chaplin, david_hasselhoff, leonardo_di_caprio])
# Test with String
# Test with float/int
query = {'gross_income_m': {'$lt': marlon_brando.appearances}}
assert len(backend.filter(Actor, query)) == len([marlon_brando, charlie_chaplin, leonardo_di_caprio, david_hasselhoff])
# Test with float/int
# Test with normal conditions
query = {'appearances': {'$lt': david_hasselhoff.appearances}}
assert len(backend.filter(Actor, query)) == len([marlon_brando, leonardo_di_caprio])
# Test with normal conditions
# Test with normal conditions
query = {'gross_income_m': {'$lt': david_hasselhoff.gross_income_m}}
assert len(backend.filter(Actor, query)) == len([marlon_brando, charlie_chaplin])
# Test with normal conditions
def test_gt(backend):
# DB setup
backend.filter(Actor, {}).delete()
marlon_brando = Actor({'name': 'Marlon Brando', 'gross_income_m': 1.453, 'appearances': 78, 'is_funny': False, 'birth_year': 1924})
leonardo_di_caprio = Actor({'name': 'Leonardo di Caprio', 'gross_income_m': 12.453, 'appearances': 34, 'is_funny': 'it depends', 'birth_year': 1974})
david_hasselhoff = Actor({'name': 'David Hasselhoff', 'gross_income_m': 12.453, 'appearances': 173, 'is_funny': True, 'birth_year': 1952})
charlie_chaplin = Actor({'name': 'Charlie Chaplin', 'gross_income_m': 0.371, 'appearances': 473, 'is_funny': True, 'birth_year': 1889})
backend.save(marlon_brando)
backend.save(leonardo_di_caprio)
backend.save(david_hasselhoff)
backend.save(charlie_chaplin)
backend.commit()
assert len(backend.filter(Actor, {})) == 4
# DB setup
# Test with String
query = {'name': {'$gt': marlon_brando.name}}
assert len(backend.filter(Actor, query)) == len([])
# Test with String
# Test with float/int
query = {'gross_income_m': {'$gt': marlon_brando.appearances}}
assert len(backend.filter(Actor, query)) == len([])
# Test with float/int
# Test with normal conditions
query = {'appearances': {'$gt': david_hasselhoff.appearances}}
assert len(backend.filter(Actor, query)) == len([charlie_chaplin])
# Test with normal conditions
# Test with normal conditions
query = {'gross_income_m': {'$gt': marlon_brando.gross_income_m}}
assert len(backend.filter(Actor, query)) == len([charlie_chaplin, david_hasselhoff])
# Test with normal conditions
def test_gte(backend):
# DB setup
backend.filter(Actor, {}).delete()
marlon_brando = Actor({'name': 'Marlon Brando', 'gross_income_m': 1.453, 'appearances': 78, 'is_funny': False, 'birth_year': 1924})
leonardo_di_caprio = Actor({'name': 'Leonardo di Caprio', 'gross_income_m': 12.453, 'appearances': 34, 'is_funny': 'it depends', 'birth_year': 1974})
david_hasselhoff = Actor({'name': 'David Hasselhoff', 'gross_income_m': 12.453, 'appearances': 173, 'is_funny': True, 'birth_year': 1952})
charlie_chaplin = Actor({'name': 'Charlie Chaplin', 'gross_income_m': 0.371, 'appearances': 473, 'is_funny': True, 'birth_year': 1889})
backend.save(marlon_brando)
backend.save(leonardo_di_caprio)
backend.save(david_hasselhoff)
backend.save(charlie_chaplin)
backend.commit()
assert len(backend.filter(Actor, {})) == 4
# DB setup
# Test with String
query = {'name': {'$gte': marlon_brando.name}}
assert len(backend.filter(Actor, query)) == len([marlon_brando])
# Test with String
# Test with float/int
query = {'gross_income_m': {'$gte': marlon_brando.appearances}}
assert len(backend.filter(Actor, query)) == len([])
# Test with float/int
# Test with normal conditions
query = {'appearances': {'$gte': david_hasselhoff.appearances}}
assert len(backend.filter(Actor, query)) == len([charlie_chaplin, marlon_brando])
# Test with normal conditions
# Test with normal conditions
query = {'gross_income_m': {'$gte': marlon_brando.gross_income_m}}
assert len(backend.filter(Actor, query)) == len([marlon_brando, charlie_chaplin, david_hasselhoff])
# Test with normal conditions
def test_lte(backend):
# DB setup
backend.filter(Actor, {}).delete()
marlon_brando = Actor({'name': 'Marlon Brando', 'gross_income_m': 1.453, 'appearances': 78, 'is_funny': False, 'birth_year': 1924})
leonardo_di_caprio = Actor({'name': 'Leonardo di Caprio', 'gross_income_m': 12.453, 'appearances': 34, 'is_funny': 'it depends', 'birth_year': 1974})
david_hasselhoff = Actor({'name': 'David Hasselhoff', 'gross_income_m': 12.453, 'appearances': 173, 'is_funny': True, 'birth_year': 1952})
charlie_chaplin = Actor({'name': 'Charlie Chaplin', 'gross_income_m': 0.371, 'appearances': 473, 'is_funny': True, 'birth_year': 1889})
backend.save(marlon_brando)
backend.save(leonardo_di_caprio)
backend.save(david_hasselhoff)
backend.save(charlie_chaplin)
backend.commit()
assert len(backend.filter(Actor, {})) == 4
# DB setup
# Test with String
query = {'name': {'$lte': marlon_brando.name}}
assert len(backend.filter(Actor, query)) == len([charlie_chaplin, marlon_brando, leonardo_di_caprio, david_hasselhoff])
# Test with String
# Test with float/int
query = {'gross_income_m': {'$lte': marlon_brando.appearances}}
assert len(backend.filter(Actor, query)) == len([marlon_brando, charlie_chaplin, leonardo_di_caprio, david_hasselhoff])
# Test with float/int
# Test with normal conditions
query = {'appearances': {'$lte': david_hasselhoff.appearances}}
assert len(backend.filter(Actor, query)) == len([marlon_brando, leonardo_di_caprio, david_hasselhoff])
# Test with normal conditions
# Test with normal conditions
query = {'gross_income_m': {'$lte': david_hasselhoff.gross_income_m}}
assert len(backend.filter(Actor, query)) == len([marlon_brando, charlie_chaplin, david_hasselhoff, leonardo_di_caprio])
# Test with normal conditions
def test_exists(backend):
# DB setup
backend.filter(Actor, {}).delete()
marlon_brando = Actor({'name': 'Marlon Brando', 'gross_income_m': 1.453, 'appearances': 78, 'is_funny': False, 'birth_year': 1924,'foo' : True})
leonardo_di_caprio = Actor({'name': 'Leonardo di Caprio', 'gross_income_m': 12.453, 'appearances': 34, 'is_funny': 'it depends','foo' : 'bar', 'birth_year': 1974})
david_hasselhoff = Actor({'name': 'David Hasselhoff', 'gross_income_m': 12.453, 'appearances': 173, 'is_funny': True, 'birth_year': 1952})
charlie_chaplin = Actor({'name': 'Charlie Chaplin', 'gross_income_m': 0.371, 'appearances': 473, 'is_funny': True, 'birth_year': 1889})
backend.save(marlon_brando)
backend.save(leonardo_di_caprio)
backend.save(david_hasselhoff)
backend.save(charlie_chaplin)
backend.commit()
assert len(backend.filter(Actor, {})) == 4
# DB setup
# Test with normal conditions
query = {'foo': {'$exists': True}}
assert len(backend.filter(Actor, query)) == 2
assert all([actor in backend.filter(Actor, query) for actor in [marlon_brando,leonardo_di_caprio]])
query = {'foo': {'$exists': False}}
assert all([actor in backend.filter(Actor, query) for actor in [david_hasselhoff,charlie_chaplin]])
def test_all(backend):
# DB setup
backend.filter(Actor, {}).delete()
marlon_brando = Actor({'name': 'Marlon Brando', 'gross_income_m': [1.453, 1.0, 12.0], 'appearances': 78, 'is_funny': False, 'birth_year': 1924})
leonardo_di_caprio = Actor({'name': 'Leonardo di Caprio', 'gross_income_m': [12.453, 1.0, 12.0], 'appearances': 34, 'is_funny': 'it depends', 'birth_year': 1974})
david_hasselhoff = Actor({'name': 'David Hasselhoff', 'gross_income_m': [12.453, 1.0, 4.0], 'appearances': 173, 'is_funny': True, 'birth_year': 1952})
charlie_chaplin = Actor({'name': 'Charlie Chaplin', 'gross_income_m': [0.371, 1.0, 99.0], 'appearances': 473, 'is_funny': True, 'birth_year': 1889})
backend.save(marlon_brando)
backend.save(leonardo_di_caprio)
backend.save(david_hasselhoff)
backend.save(charlie_chaplin)
backend.commit()
assert len(backend.filter(Actor, {})) == 4
# DB setup
# Test with normal conditions
query = {'name': {'$all': [charlie_chaplin.name]}}
assert len(backend.filter(Actor, query)) == len([charlie_chaplin])
# Test with normal conditions
# Test with empty list
query = {'name': {'$all': []}}
actors = backend.filter(Actor, query)
assert len(backend.filter(Actor, query)) == len([])
# Test with empty list
# Test with no result
query = {'name': {'$all': ['jackie chan']}}
actors = backend.filter(Actor, query)
assert len(backend.filter(Actor, query)) == len([])
# Test with no result
# Test with int
query = {'appearances': {'$all': [78]}}
assert len(backend.filter(Actor, query)) == len([marlon_brando])
# Test with int
# Test with float
query = {'gross_income_m': {'$all': [1.0, 1.453]}}
assert len(backend.filter(Actor, query)) == len([marlon_brando])
# Test with float
# Test with full result
query = {'gross_income_m': {'$all': [1.0]}}
assert len(backend.filter(Actor, query)) == len([marlon_brando, charlie_chaplin, david_hasselhoff, leonardo_di_caprio])
# Test with full result
# Test with boolean list
query = {'is_funny': {'$all': [True]}}
assert len(backend.filter(Actor, query)) == len([david_hasselhoff, charlie_chaplin])
# Test with boolean list
# Test with mixed values/list
query = {'is_funny': {'$all': ['it depends', marlon_brando.name, leonardo_di_caprio.appearances, charlie_chaplin.gross_income_m]}}
assert len(backend.filter(Actor, query)) == len([])
# Test with mixed values/list
# Test with crossed type
query = {'name': {'$all': [True]}}
assert len(backend.filter(Actor, query)) == len([])
# Test with crossed type
# Test with unknown attribute
query = {'named': {'$all': [marlon_brando.name]}}
assert len(backend.filter(Actor, query)) == len([])
# Test with unknown attribute
def test_ne(backend):
# DB setup
backend.filter(Actor, {}).delete()
marlon_brando = Actor({'name': 'Marlon Brando', 'gross_income_m': 1.453, 'appearances': 78, 'is_funny': False, 'birth_year': 1924})
leonardo_di_caprio = Actor({'name': 'Leonardo di Caprio', 'gross_income_m': 12.453, 'appearances': 34, 'is_funny': 'it depends', 'birth_year': 1974})
david_hasselhoff = Actor({'name': 'David Hasselhoff', 'gross_income_m': 12.453, 'appearances': 173, 'is_funny': True, 'birth_year': 1952})
charlie_chaplin = Actor({'name': 'Charlie Chaplin', 'gross_income_m': 0.371, 'appearances': 473, 'is_funny': True, 'birth_year': 1889})
backend.save(marlon_brando)
backend.save(leonardo_di_caprio)
backend.save(david_hasselhoff)
backend.save(charlie_chaplin)
backend.commit()
assert len(backend.filter(Actor, {})) == 4
# DB setup
# Test with normal conditions
query = {'name': {'$ne': charlie_chaplin.name}}
assert len(backend.filter(Actor, query)) == 3
# Test with normal conditions
# Test with no result
query = {'name': {'$ne': 'jackie chan'}}
assert len(backend.filter(Actor, query)) == 4
# Test with no result
# Test with int
query = {'appearances': {'$ne': 78}}
assert len(backend.filter(Actor, query)) == 3
# Test with int
# Test with float/full results
query = {'gross_income_m': {'$ne': 0.0}}
assert len(backend.filter(Actor, query)) == 4
# Test with float/full results
# Test with boolean
query = {'is_funny': {'$ne': True}}
assert len(backend.filter(Actor, query)) == 2
# Test with boolean
# Test with boolean/string
query = {'is_funny': {'$ne': 'it depends'}}
assert len(backend.filter(Actor, query)) == 3
# Test with boolean/string
# Test with crossed type
query = {'appearances': {'$ne': True}}
assert len(backend.filter(Actor, query)) == 4
# Test with crossed type
def test_and(backend):
# DB setup
backend.filter(Actor, {}).delete()
marlon_brando = Actor({'name': 'Marlon Brando', 'gross_income_m': 1.453, 'appearances': 78, 'is_funny': False, 'birth_year': 1924})
leonardo_di_caprio = Actor({'name': 'Leonardo di Caprio', 'gross_income_m': 12.453, 'appearances': 34, 'is_funny': 'it depends', 'birth_year': 1974})
david_hasselhoff = Actor({'name': 'David Hasselhoff', 'gross_income_m': 12.453, 'appearances': 173, 'is_funny': True, 'birth_year': 1952})
charlie_chaplin = Actor({'name': 'Charlie Chaplin', 'gross_income_m': 0.371, 'appearances': 473, 'is_funny': True, 'birth_year': 1889})
backend.save(marlon_brando)
backend.save(leonardo_di_caprio)
backend.save(david_hasselhoff)
backend.save(charlie_chaplin)
backend.commit()
assert len(backend.filter(Actor, {})) == 4
# DB setup
# Test with normal conditions
query = {'$and': [{'name': charlie_chaplin.name}, {'birth_year': 1889}]}
assert len(backend.filter(Actor, query)) == len([charlie_chaplin])
# Test with normal conditions
# Test with no results
query = {'$and': [{'name': charlie_chaplin.name}, {'birth_year': 1924}, {'is_funny': 'it depends'}, {'gross_income_m': '12.453'}]}
assert len(backend.filter(Actor, query)) == len([])
# Test with no results
# Test repeating request
query = {'$and': [{'name': charlie_chaplin.name}, {'name': charlie_chaplin.name}, {'name': charlie_chaplin.name}, {'name': charlie_chaplin.name}]}
assert len(backend.filter(Actor, query)) == len([charlie_chaplin])
# Test repeating request
# Test with no result
query = {'$and': [{'name': charlie_chaplin.name}, {'birth_year': {'$lt': 1889}}]}
assert len(backend.filter(Actor, query)) == len([])
# Test with no result
# Test with no result
query = {'$and': [{'appearances': 473}, {'birth_year': {'$lt': 1879}}]}
assert len(backend.filter(Actor, query)) == len([])
# Test with no result
# Test with crossed type
query = {'$and': [{'name': charlie_chaplin.appearances}, {'birth_year': 'may be'}]}
assert len(backend.filter(Actor, query)) == len([])
# Test with crossed type
# Test with unknown attribute
query = {'$and': [{'named': charlie_chaplin.name}, {'birth_year': 1889}]}
assert len(backend.filter(Actor, query)) == len([])
# Test with unknown attribute
def test_or(backend):
# DB setup
backend.filter(Actor, {}).delete()
marlon_brando = Actor({'name': 'Marlon Brando', 'gross_income_m': 1.453, 'appearances': 78, 'is_funny': False, 'birth_year': 1924})
leonardo_di_caprio = Actor({'name': 'Leonardo di Caprio', 'gross_income_m': 12.453, 'appearances': 34, 'is_funny': 'it depends', 'birth_year': 1974})
david_hasselhoff = Actor({'name': 'David Hasselhoff', 'gross_income_m': 12.453, 'appearances': 173, 'is_funny': True, 'birth_year': 1952})
charlie_chaplin = Actor({'name': 'Charlie Chaplin', 'gross_income_m': 0.371, 'appearances': 473, 'is_funny': True, 'birth_year': 1889})
backend.save(marlon_brando)
backend.save(leonardo_di_caprio)
backend.save(david_hasselhoff)
backend.save(charlie_chaplin)
backend.commit()
assert len(backend.filter(Actor, {})) == 4
# DB setup
# Test with normal conditions
query = {'$or': [{'name': charlie_chaplin.name}, {'birth_year': 1889}]}
assert len(backend.filter(Actor, query)) == len([charlie_chaplin])
# Test with normal conditions
# Test with full results
query = {'$or': [{'name': charlie_chaplin.name}, {'birth_year': 1924}, {'is_funny': 'it depends'}, {'gross_income_m': 12.453}]}
assert len(backend.filter(Actor, query)) == len([charlie_chaplin, marlon_brando, leonardo_di_caprio, david_hasselhoff])
# Test with full results
# Test repeating request
query = {'$or': [{'name': charlie_chaplin.name}, {'name': charlie_chaplin.name}, {'name': charlie_chaplin.name}, {'name': charlie_chaplin.name}]}
assert len(backend.filter(Actor, query)) == len([charlie_chaplin])
# Test repeating request
# Test with no result
query = {'$or': [{'name': 'Marlon not Brando'}, {'appearances': 4224}]}
assert len(backend.filter(Actor, query)) == len([])
# Test with no result
# Test with crossed type
query = {'$or': [{'name': charlie_chaplin.appearances}, {'birth_year': 'may be'}]}
assert len(backend.filter(Actor, query)) == len([])
# Test with crossed type
# Test with unknown attribute
query = {'$or': [{'named': charlie_chaplin.name}, {'birth_year': 1889}]}
assert len(backend.filter(Actor, query)) == len([charlie_chaplin])
# Test with unknown attribute
def test_regex(backend):
# DB setup
backend.filter(Actor, {}).delete()
marlon_brando = Actor({'name': 'Marlon Brando', 'gross_income_m': 1.453, 'appearances': 78, 'is_funny': False, 'birth_year': 1924})
leonardo_di_caprio = Actor({'name': 'Leonardo di Caprio', 'gross_income_m': 12.453, 'appearances': 34, 'is_funny': 'it depends', 'birth_year': 1974})
david_hasselhoff = Actor({'name': 'David Hasselhoff', 'gross_income_m': 12.453, 'appearances': 173, 'is_funny': True, 'birth_year': 1952})
charlie_chaplin = Actor({'name': 'Charlie Chaplin', 'gross_income_m': 0.371, 'appearances': 473, 'is_funny': True, 'birth_year': 1889})
backend.save(marlon_brando)
backend.save(leonardo_di_caprio)
backend.save(david_hasselhoff)
backend.save(charlie_chaplin)
backend.commit()
assert len(backend.filter(Actor, {})) == 4
# DB setup
# Test with normal conditions
query = {'name': {'$regex': 'Mar.*do'}}
assert len(backend.filter(Actor, query)) == len([marlon_brando])
# Test with normal conditions
# Test with full results
query = {'name': {'$regex': '/*'}}
assert len(backend.filter(Actor, query)) == len([charlie_chaplin, marlon_brando, leonardo_di_caprio, david_hasselhoff])
# Test with full results
# Test repeating request
query = {'$and': [{'name': {'$regex': r'^.*\s+Brando'}}, {'name': {'$regex': r'^.*\s+Brando'}}, {'name': {'$regex': r'^.*\s+Brando'}}, {'name': {'$regex': r'^.*\s+Brando'}}]}
assert len(backend.filter(Actor, query)) == len([marlon_brando])
# Test repeating request
# Test with no result
query = {'name': {'$regex': r'^test@test.com'}}
assert len(backend.filter(Actor, query)) == len([])
# Test with no result
# Test with crossed type
query = {'gross_income_m': {'$regex': r'^Marlon\s+.*$'}}
assert len(backend.filter(Actor, query)) == len([])
# Test with crossed type
# Test with unknown attribute
query = {'gross_income_bad': {'$regex': r'^Marlon\s+.*$'}}
assert len(backend.filter(Actor, query)) == len([])
# Test with unknown attribute
|
|
"""Test the Honeywell Lyric config flow."""
import asyncio
from unittest.mock import patch
import pytest
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.http import CONF_BASE_URL, DOMAIN as DOMAIN_HTTP
from homeassistant.components.lyric import config_flow
from homeassistant.components.lyric.const import DOMAIN, OAUTH2_AUTHORIZE, OAUTH2_TOKEN
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.helpers import config_entry_oauth2_flow
from tests.common import MockConfigEntry
CLIENT_ID = "1234"
CLIENT_SECRET = "5678"
@pytest.fixture()
async def mock_impl(hass):
"""Mock implementation."""
await setup.async_setup_component(hass, "http", {})
impl = config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
CLIENT_ID,
CLIENT_SECRET,
OAUTH2_AUTHORIZE,
OAUTH2_TOKEN,
)
config_flow.OAuth2FlowHandler.async_register_implementation(hass, impl)
return impl
async def test_abort_if_no_configuration(hass):
"""Check flow abort when no configuration."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "missing_configuration"
async def test_full_flow(
hass, hass_client_no_auth, aioclient_mock, current_request_with_host
):
"""Check full flow."""
assert await setup.async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_CLIENT_ID: CLIENT_ID,
CONF_CLIENT_SECRET: CLIENT_SECRET,
},
DOMAIN_HTTP: {CONF_BASE_URL: "https://example.com"},
},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
state = config_entry_oauth2_flow._encode_jwt(
hass,
{
"flow_id": result["flow_id"],
"redirect_uri": "https://example.com/auth/external/callback",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["url"] == (
f"{OAUTH2_AUTHORIZE}?response_type=code&client_id={CLIENT_ID}"
"&redirect_uri=https://example.com/auth/external/callback"
f"&state={state}"
)
client = await hass_client_no_auth()
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == 200
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
OAUTH2_TOKEN,
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch("homeassistant.components.lyric.api.ConfigEntryLyricClient"), patch(
"homeassistant.components.lyric.async_setup_entry", return_value=True
) as mock_setup:
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["data"]["auth_implementation"] == DOMAIN
result["data"]["token"].pop("expires_at")
assert result["data"]["token"] == {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
}
assert DOMAIN in hass.config.components
entry = hass.config_entries.async_entries(DOMAIN)[0]
assert entry.state is config_entries.ConfigEntryState.LOADED
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert len(mock_setup.mock_calls) == 1
async def test_abort_if_authorization_timeout(
hass, mock_impl, current_request_with_host
):
"""Check Somfy authorization timeout."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
flow = config_flow.OAuth2FlowHandler()
flow.hass = hass
with patch.object(
mock_impl, "async_generate_authorize_url", side_effect=asyncio.TimeoutError
):
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "authorize_url_timeout"
async def test_reauthentication_flow(
hass, hass_client_no_auth, aioclient_mock, current_request_with_host
):
"""Test reauthentication flow."""
await setup.async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_CLIENT_ID: CLIENT_ID,
CONF_CLIENT_SECRET: CLIENT_SECRET,
},
DOMAIN_HTTP: {CONF_BASE_URL: "https://example.com"},
},
)
old_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=DOMAIN,
version=1,
data={"id": "timmo", "auth_implementation": DOMAIN},
)
old_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_REAUTH}, data=old_entry.data
)
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
result = await hass.config_entries.flow.async_configure(flows[0]["flow_id"], {})
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(
hass,
{
"flow_id": result["flow_id"],
"redirect_uri": "https://example.com/auth/external/callback",
},
)
client = await hass_client_no_auth()
await client.get(f"/auth/external/callback?code=abcd&state={state}")
aioclient_mock.post(
OAUTH2_TOKEN,
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch("homeassistant.components.lyric.api.ConfigEntryLyricClient"):
with patch(
"homeassistant.components.lyric.async_setup_entry", return_value=True
) as mock_setup:
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "reauth_successful"
assert len(mock_setup.mock_calls) == 1
|
|
# mininode.py - Bitcoin P2P network half-a-node
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# This python code was modified from ArtForz' public domain half-a-node, as
# found in the mini-node branch of http://github.com/jgarzik/pynode.
#
# NodeConn: an object which manages p2p connectivity to a bitcoin node
# NodeConnCB: a base class that describes the interface for receiving
# callbacks with network messages from a NodeConn
# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
# data structures that should map to corresponding structures in
# bitcoin/primitives
# msg_block, msg_tx, msg_headers, etc.:
# data structures that represent network messages
# ser_*, deser_*: functions that handle serialization/deserialization
import struct
import socket
import asyncore
import binascii
import time
import sys
import random
from io import BytesIO
from codecs import encode
import hashlib
from threading import RLock
from threading import Thread
import logging
import copy
BIP0031_VERSION = 60000
MY_VERSION = 60001 # past bip-31 for ping/pong
MY_SUBVERSION = "/python-mininode-tester:0.0.1/"
MAX_INV_SZ = 50000
MAX_BLOCK_SIZE = 1000000
COIN = 100000000L # 1 btc in satoshis
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def deser_string(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return f.read(nit)
def ser_string(s):
if len(s) < 253:
return struct.pack("B", len(s)) + s
elif len(s) < 0x10000:
return struct.pack("<BH", 253, len(s)) + s
elif len(s) < 0x100000000L:
return struct.pack("<BI", 254, len(s)) + s
return struct.pack("<BQ", 255, len(s)) + s
def deser_uint256(f):
r = 0L
for i in xrange(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = ""
for i in xrange(8):
rs += struct.pack("<I", u & 0xFFFFFFFFL)
u >>= 32
return rs
def uint256_from_str(s):
r = 0L
t = struct.unpack("<IIIIIIII", s[:32])
for i in xrange(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFFL) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
def ser_vector(l):
r = ""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000L:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000L:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000L:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = ""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000L:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += struct.pack("<i", i)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(binascii.unhexlify(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return binascii.hexlify(obj.serialize()).decode('utf-8')
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = "\x00" * 10 + "\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = ""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block"}
def __init__(self, t=0, h=0L):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = ""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = ""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig="", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = ""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), binascii.hexlify(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = ""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
binascii.hexlify(self.scriptPubKey))
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
def rehash(self):
self.sha256 = None
self.calc_sha256()
def calc_sha256(self):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = ""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self):
r = ""
r += super(CBlock, self).serialize()
r += ser_vector(self.vtx)
return r
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
while len(hashes) > 1:
newhashes = []
for i in xrange(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = ""
self.strStatusBar = ""
self.strReserved = ""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = ""
self.vchSig = ""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = ""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
# Objects that correspond to messages on the wire
class msg_version(object):
command = "version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight)
class msg_verack(object):
command = "verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = "addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = "alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = ""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = "inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = "getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = "getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0L
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = ""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = "tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_block(object):
command = "block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
class msg_getaddr(object):
command = "getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = "ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = "ping"
def __init__(self, nonce=0L):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = ""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = "pong"
def __init__(self, nonce=0L):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = ""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = "mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = "sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = "getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0L
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = ""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = "headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = "reject"
def __init__(self):
self.message = ""
self.code = 0
self.reason = ""
self.data = 0L
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.message == "block" or self.message == "tx"):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.message == "block" or self.message == "tx"):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
# Helper function
def wait_until(predicate, attempts=float('inf'), timeout=float('inf')):
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
class msg_feefilter(object):
command = "feefilter"
def __init__(self, feerate=0L):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = ""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
# This is what a callback should look like for NodeConn
# Reimplement the on_* functions to provide handling for events
class NodeConnCB(object):
def __init__(self):
self.verack_received = False
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Spin until verack message is received from the node.
# Tests may want to use this as a signal that the test can begin.
# This can be called from the testing thread, so it needs to acquire the
# global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
def deliver(self, conn, message):
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
getattr(self, 'on_' + message.command)(conn, message)
except:
print "ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0])
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_block(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_reject(self, conn, message): pass
def on_close(self, conn): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
def on_feefilter(self, conn, message): pass
# More useful callbacks and functions for NodeConnCB's which have a single NodeConn
class SingleNodeConnCB(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
"version": msg_version,
"verack": msg_verack,
"addr": msg_addr,
"alert": msg_alert,
"inv": msg_inv,
"getdata": msg_getdata,
"getblocks": msg_getblocks,
"tx": msg_tx,
"block": msg_block,
"getaddr": msg_getaddr,
"ping": msg_ping,
"pong": msg_pong,
"headers": msg_headers,
"getheaders": msg_getheaders,
"reject": msg_reject,
"mempool": msg_mempool,
"feefilter": msg_feefilter
}
MAGIC_BYTES = {
"mainnet": "\xf9\xbe\xb4\xd9", # mainnet
"testnet3": "\x0b\x11\x09\x07", # testnet3
"regtest": "\xfa\xbf\xb5\xda" # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=1):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = ""
self.recvbuf = ""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
print 'MiniNode: Connecting to Bitcoin Node IP # ' + dstaddr + ':' \
+ str(dstport)
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def show_debug_msg(self, msg):
self.log.debug(msg)
def handle_connect(self):
self.show_debug_msg("MiniNode: Connected & Listening: \n")
self.state = "connected"
def handle_close(self):
self.show_debug_msg("MiniNode: Closing Connection to %s:%d... "
% (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = ""
self.sendbuf = ""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
length = len(self.sendbuf)
return (length > 0)
def handle_write(self):
with mininode_lock:
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split("\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split("\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
self.show_debug_msg("Unknown command: '" + command + "' " +
repr(msg))
except Exception as e:
print 'got_data:', repr(e)
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
return
self.show_debug_msg("Send %s" % repr(message))
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += "\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == "version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap['ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap['ping']())
self.show_debug_msg("Recv %s" % repr(message))
self.cb.deliver(self, message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Waymo Open Dataset. See waymo.com/open."""
import os
from absl import logging
import tensorflow as tf
from tensorflow_datasets.proto import waymo_dataset_pb2 as open_dataset
import tensorflow_datasets.public_api as tfds
_CITATION = """
@InProceedings{Sun_2020_CVPR,
author = {Sun, Pei and Kretzschmar, Henrik and Dotiwalla, Xerxes and Chouard, Aurelien and Patnaik, Vijaysai and Tsui, Paul and Guo, James and Zhou, Yin and Chai, Yuning and Caine, Benjamin and Vasudevan, Vijay and Han, Wei and Ngiam, Jiquan and Zhao, Hang and Timofeev, Aleksei and Ettinger, Scott and Krivokon, Maxim and Gao, Amy and Joshi, Aditya and Zhang, Yu and Shlens, Jonathon and Chen, Zhifeng and Anguelov, Dragomir},
title = {Scalability in Perception for Autonomous Driving: Waymo Open Dataset},
booktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2020}
}
"""
_DESCRIPTION = """\
The Waymo Open Dataset is comprised of high resolution sensor data
collected by Waymo self-driving cars in a wide variety of conditions.
This data is licensed for non-commercial use.
WARNING: this dataset requires additional authorization and registration.
Please look at tfds documentation for accessing GCS, and
afterwards, please register via https://waymo.com/open/licensing/
"""
_GCS_DESCRIPTION = """
This dataset is also available in pre-processed format, making it faster
to load, if you select the correct data_dir:
```
tfds.load('waymo_open_dataset/{}', \
data_dir='gs://waymo_open_dataset_{}_individual_files/tensorflow_datasets')
```
"""
_HOMEPAGE_URL = "http://www.waymo.com/open/"
_OBJECT_LABELS = [
"TYPE_UNKNOWN", "TYPE_VEHICLE", "TYPE_PEDESTRIAN", "TYPE_SIGN",
"TYPE_CYCLIST"
]
class WaymoOpenDatasetConfig(tfds.core.BuilderConfig):
"""BuilderConfig for Waymo Open Dataset Config."""
def __init__(self,
*,
name,
version_str,
description,
is_on_gcs=False,
**kwargs):
"""BuilderConfig for Waymo Open Dataset examples.
Args:
name: Config name
version_str: Version string (e.g. `v_1_2_0`).
description: Description
is_on_gcs: Whether the dataset is availabe preprocessed on GCS
**kwargs: keyword arguments forwarded to super.
"""
if is_on_gcs:
description = description + _GCS_DESCRIPTION.format(name, version_str)
super(WaymoOpenDatasetConfig, self).__init__(
name=name,
version=tfds.core.Version("0.2.0"),
description=description,
**kwargs)
self.cloud_bucket = tfds.core.Path(
f"gs://waymo_open_dataset_{version_str}_individual_files/")
class WaymoOpenDataset(tfds.core.BeamBasedBuilder):
"""Waymo Open Dataset."""
BUILDER_CONFIGS = [
WaymoOpenDatasetConfig(
name="v1.2",
version_str="v_1_2_0",
description="Waymo Open Dataset v1.2",
),
WaymoOpenDatasetConfig(
name="v1.1",
version_str="v_1_1_0",
description="Waymo Open Dataset v1.1",
),
WaymoOpenDatasetConfig(
name="v1.0",
version_str="v_1_0_0",
description="Waymo Open Dataset v1.0",
is_on_gcs=True,
),
]
def _info(self) -> tfds.core.DatasetInfo:
# Annotation descriptions are in the object development kit.
annotations = {
"type": tfds.features.ClassLabel(names=_OBJECT_LABELS),
"bbox": tfds.features.BBoxFeature(),
}
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"context": {
"name": tfds.features.Text()
},
"timestamp_micros": tf.int64,
"camera_FRONT": {
"image":
tfds.features.Image(
shape=(1280, 1920, 3), encoding_format="jpeg"),
"labels":
tfds.features.Sequence(annotations)
},
"camera_FRONT_LEFT": {
"image":
tfds.features.Image(
shape=(1280, 1920, 3), encoding_format="jpeg"),
"labels":
tfds.features.Sequence(annotations)
},
"camera_SIDE_LEFT": {
"image":
tfds.features.Image(
shape=(886, 1920, 3), encoding_format="jpeg"),
"labels":
tfds.features.Sequence(annotations)
},
"camera_FRONT_RIGHT": {
"image":
tfds.features.Image(
shape=(1280, 1920, 3), encoding_format="jpeg"),
"labels":
tfds.features.Sequence(annotations)
},
"camera_SIDE_RIGHT": {
"image":
tfds.features.Image(
shape=(886, 1920, 3), encoding_format="jpeg"),
"labels":
tfds.features.Sequence(annotations)
},
}),
homepage=_HOMEPAGE_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns the SplitGenerators.
Args:
dl_manager: Download manager object.
Returns:
SplitGenerators.
"""
# Training set
train_files = tf.io.gfile.glob(
os.path.join(self.builder_config.cloud_bucket,
"training/segment*camera*"))
logging.info("Train files: %s", train_files)
# Validation set
validation_files = tf.io.gfile.glob(
os.path.join(self.builder_config.cloud_bucket,
"validation/segment*camera*"))
logging.info("Validation files: %s", validation_files)
split_generators = [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"tf_record_files": train_files,
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
"tf_record_files": validation_files,
},
),
]
# Testing set (Only available in Waymo Open Dataset v1.2)
if self.builder_config.name == "v_1_2":
test_files = tf.io.gfile.glob(
os.path.join(self.builder_config.cloud_bucket,
"testing/segment*camera*"))
logging.info("Testing files: %s", test_files)
split_generators.append(
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
"tf_record_files": test_files,
},
))
return split_generators
def _build_pcollection(self, pipeline, tf_record_files):
"""Generate examples as dicts.
Args:
pipeline: Apache Beam pipeline.
tf_record_files: .tfrecord files.
Returns:
Dict of examples.
"""
beam = tfds.core.lazy_imports.apache_beam
def _process_example(tf_record_file):
for image_and_annotation in _generate_images_and_annotations(
tf_record_file):
key = "%s:%s" % (image_and_annotation["context"]["name"],
image_and_annotation["timestamp_micros"])
yield key, image_and_annotation
return (pipeline
| beam.Create(tf_record_files)
| beam.FlatMap(_process_example))
def _generate_images_and_annotations(tf_record_file):
"""Yields the images and annotations from a given file.
Args:
tf_record_file: .tfrecord files.
Yields:
Waymo images and annotations.
"""
# Go through all frames
dataset = tf.data.TFRecordDataset(tf_record_file, compression_type="")
for data in tfds.as_numpy(dataset):
frame = open_dataset.Frame()
frame.ParseFromString(bytearray(data)) # pytype: disable=wrong-arg-types
image_and_annotation = {
"context": {
"name": frame.context.name
},
"timestamp_micros": frame.timestamp_micros
}
camera_calibration = {
calibration.name: calibration
for calibration in frame.context.camera_calibrations
}
camera_labels = {label.name: label for label in frame.camera_labels}
# Go through all 5 camera images in the frame
for frame_image in frame.images:
labels = None
if frame_image.name in camera_labels:
image_height = camera_calibration[frame_image.name].height
image_width = camera_calibration[frame_image.name].width
labels = _convert_labels(camera_labels[frame_image.name], image_width,
image_height)
camera_name = open_dataset.CameraName.Name.Name(frame_image.name)
image_and_annotation["camera_" + camera_name] = {
"image": frame_image.image,
"labels": labels
}
yield image_and_annotation
def _convert_labels(raw_labels, image_width, image_height):
"""Convert labels to bounding boxes.
Args:
raw_labels: Raw label data.
image_width: Width of the Waymo images.
image_height: Height of the Waymo images.
Returns:
List of dicts with the label type and the corresponding bounding boxes.
"""
return [
{ # pylint: disable=g-complex-comprehension
"type": raw_label.type,
"bbox": _build_bounding_box(raw_label.box, image_width, image_height)
} for raw_label in raw_labels.labels
]
def _build_bounding_box(open_dataset_box, image_width, image_height):
"""Builds and returns TFDS bounding box.
Args:
open_dataset_box: Bounding box center x,y coordinates and its length, width.
image_width: Width of the Waymo images.
image_height: Height of the Waymo images.
Returns:
tfds.features.BBox.
"""
center_x = open_dataset_box.center_x
center_y = open_dataset_box.center_y
length = open_dataset_box.length
width = open_dataset_box.width
return tfds.features.BBox(
ymin=max((center_y - (width / 2)) / image_height, 0.0),
ymax=min((center_y + (width / 2)) / image_height, 1.0),
xmin=max((center_x - (length / 2)) / image_width, 0.0),
xmax=min((center_x + (length / 2)) / image_width, 1.0),
)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Sebastian Wiesner <lunaryorn@googlemail.com>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
synaptiks.kde.widgets.touchpad
==============================
Widgets for touchpad information and configuration
.. moduleauthor:: Sebastian Wiesner <lunaryorn@googlemail.com>
"""
from __future__ import (print_function, division, unicode_literals,
absolute_import)
from PyQt4.QtCore import pyqtSignal, QRegExp
from PyQt4.QtGui import QWidget
from PyKDE4.kdecore import i18nc
from PyKDE4.kdeui import KTabWidget, KIconLoader, KComboBox
from synaptiks.kde.widgets import DynamicUserInterfaceMixin
from synaptiks.kde.widgets.config import ConfigurationWidgetMixin
class TouchpadInformationWidget(QWidget, DynamicUserInterfaceMixin):
"""
A widget which shows some information about a touchpad.
This currently includes:
- the device name of the touchpad,
- what physical buttons, the touchpad has,
- how many fingers it can detect,
- and whether it supports two-finger emulation.
"""
def __init__(self, parent=None):
"""
Create a new information widget.
"""
QWidget.__init__(self, parent)
self._load_userinterface()
def show_touchpad(self, touchpad):
"""
Show information about the given ``touchpad`` in this widget.
``touchpad`` is a :class:`~synaptiks.touchpad.Touchpad` object.
"""
self.nameLabel.setText(i18nc(
'@info touchpad name', '<title><resources>%1</resource></title>',
touchpad.name))
pixmaps = {True: 'dialog-ok', False: 'dialog-cancel'}
for key in pixmaps:
pixmaps[key] = KIconLoader.global_().loadIcon(
pixmaps[key], KIconLoader.Small)
button_widgets = ('left', 'middle', 'right')
for widget_name, is_supported in zip(button_widgets, touchpad.buttons):
widget = getattr(self, '{0}Button'.format(widget_name))
widget.setPixmap(pixmaps[is_supported])
self.fingerDetection.setValue(touchpad.finger_detection)
# disable the emulation box, if the touchpad natively supports two
# fingers natively.
if touchpad.finger_detection > 2:
self.twoFingerEmulationBox.setEnabled(False)
# nonetheless always assign proper pixmaps
self.fingerWidthDetection.setPixmap(
pixmaps[touchpad.has_finger_width_detection])
self.pressureDetection.setPixmap(
pixmaps[touchpad.has_pressure_detection])
self.twoFingerEmulation.setPixmap(
pixmaps[touchpad.has_two_finger_emulation])
class MotionPage(QWidget, DynamicUserInterfaceMixin):
"""
Configuration page to configure the settings for cursor motion on the
touchpad.
"""
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self._load_userinterface()
class ScrollingPage(QWidget, DynamicUserInterfaceMixin):
"""
Configuration page to configure scrolling.
"""
def __init__(self, touchpad, parent=None):
QWidget.__init__(self, parent)
self._load_userinterface()
# HACK: the designer is seems unable to set this property, so we set it
# in code.
self.touchpad_coasting_speed.setSpecialValueText(
i18nc('@item coasting speed special value', 'Disabled'))
self.touchpad_coasting_speed.valueChanged.connect(
self._coasting_speed_changed)
self.coasting.toggled.connect(self._coasting_toggled)
step = self.touchpad_coasting_speed.singleStep()
value = self.touchpad_coasting_speed.value()
self._saved_coasting_speed = value or step
if touchpad.finger_detection >= 2 or touchpad.has_two_finger_emulation:
two_finger_widgets = self.findChildren(
QWidget, QRegExp('touchpad_(.*)_two_finger_scrolling'))
for widget in two_finger_widgets:
widget.setEnabled(True)
def _coasting_toggled(self, checked):
if checked and not self.touchpad_coasting_speed.value():
self.touchpad_coasting_speed.setValue(self._saved_coasting_speed)
elif not checked:
self.touchpad_coasting_speed.setValue(0)
def _coasting_speed_changed(self, value):
if value:
# remember any non-zero value to restore it, when the user
# re-checks "coasting"
self._saved_coasting_speed = value
self.coasting.setChecked(bool(value))
class TappingPage(QWidget, DynamicUserInterfaceMixin):
"""
Configuration page to configure tapping.
"""
def __init__(self, touchpad, parent=None):
QWidget.__init__(self, parent)
self._load_userinterface()
finger_tap_actions = self.findChildren(
KComboBox, QRegExp('touchpad_f[1-3]_tap_action'))
for widget in finger_tap_actions[touchpad.finger_detection:]:
self._set_enabled(widget, False)
if touchpad.has_two_finger_emulation:
self._set_enabled(self.touchpad_f2_tap_action, True)
def _set_enabled(self, widget, enabled):
widget.setEnabled(enabled)
self.fingerButtonsLayout.labelForField(widget).setEnabled(enabled)
class HardwarePage(QWidget, DynamicUserInterfaceMixin):
"""
Configuration page for hardware settings.
"""
def __init__(self, touchpad, parent=None):
QWidget.__init__(self, parent)
self._load_userinterface()
self.information.show_touchpad(touchpad)
class TouchpadConfigurationWidget(KTabWidget, ConfigurationWidgetMixin):
"""
A tab widget to configure the touchpad properties.
This basically aggregates all configuration pages in this module and adds
configuration management.
"""
configurationChanged = pyqtSignal(bool)
NAME_PREFIX = 'touchpad'
PROPERTY_MAP = dict(
QCheckBox='checked', QRadioButton='checked', QGroupBox='checked',
MouseButtonComboBox='currentIndex', KComboBox='currentIndex',
KIntNumInput='value', KDoubleNumInput='value'
)
CHANGED_SIGNAL_MAP = dict(
QCheckBox='toggled', QRadioButton='toggled', QGroupBox='toggled',
MouseButtonComboBox='currentIndexChanged',
KComboBox='currentIndexChanged',
KIntNumInput='valueChanged', KDoubleNumInput='valueChanged'
)
def __init__(self, config, parent=None):
"""
Create a new configuration widget for the given ``touchpad``.
``config`` is the :class:`~synaptiks.config.TouchpadConfiguration`
object displayed by this widget. ``parent`` is the parent
:class:`~PyQt4.QtGui.QWidget` (can be ``None``).
"""
KTabWidget.__init__(self, parent)
self.touchpad_config = config
pages = [HardwarePage(self.touchpad, self), MotionPage(self),
ScrollingPage(self.touchpad, self),
TappingPage(self.touchpad, self)]
for page in pages:
self.addTab(page, page.windowTitle())
self.setWindowTitle(
i18nc('@title:window', 'Touchpad configuration'))
self._setup(self.touchpad_config)
@property
def touchpad(self):
"""
The :class:`~synaptiks.touchpad.Touchpad` object associated with this
widget.
"""
return self.touchpad_config.touchpad
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_pool
short_description: Manages F5 BIG-IP LTM pools.
description:
- Manages F5 BIG-IP LTM pools via iControl REST API.
version_added: 1.2
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
notes:
- Requires BIG-IP software version >= 11.
- F5 developed module 'F5-SDK' required (https://github.com/F5Networks/f5-common-python).
- Best run as a local_action in your playbook.
requirements:
- f5-sdk
options:
description:
description:
- Specifies descriptive text that identifies the pool.
version_added: "2.3"
name:
description:
- Pool name
required: True
aliases:
- pool
lb_method:
description:
- Load balancing method. When creating a new pool, if this value is not
specified, the default of C(round-robin) will be used.
version_added: "1.3"
choices:
- dynamic-ratio-member
- dynamic-ratio-node
- fastest-app-response
- fastest-node
- least-connections-member
- least-connections-node
- least-sessions
- observed-member
- observed-node
- predictive-member
- predictive-node
- ratio-least-connections-member
- ratio-least-connections-node
- ratio-member
- ratio-node
- ratio-session
- round-robin
- weighted-least-connections-member
- weighted-least-connections-nod
monitor_type:
description:
- Monitor rule type when C(monitors) > 1.
version_added: "1.3"
choices: ['and_list', 'm_of_n']
quorum:
description:
- Monitor quorum value when C(monitor_type) is C(m_of_n).
version_added: "1.3"
monitors:
description:
- Monitor template name list. If the partition is not provided as part of
the monitor name, then the C(partition) option will be used instead.
version_added: "1.3"
slow_ramp_time:
description:
- Sets the ramp-up time (in seconds) to gradually ramp up the load on
newly added or freshly detected up pool members.
version_added: "1.3"
reselect_tries:
description:
- Sets the number of times the system tries to contact a pool member
after a passive failure.
version_added: "2.2"
service_down_action:
description:
- Sets the action to take when node goes down in pool.
version_added: "1.3"
choices:
- none
- reset
- drop
- reselect
host:
description:
- Pool member IP.
aliases:
- address
port:
description:
- Pool member port.
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Create pool
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
name: "my-pool"
partition: "Common"
lb_method: "least_connection_member"
slow_ramp_time: 120
delegate_to: localhost
- name: Modify load balancer method
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
name: "my-pool"
partition: "Common"
lb_method: "round_robin"
delegate_to: localhost
- name: Add pool member
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
name: "my-pool"
partition: "Common"
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
delegate_to: localhost
- name: Remove pool member from pool
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "absent"
name: "my-pool"
partition: "Common"
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
delegate_to: localhost
- name: Delete pool
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "absent"
name: "my-pool"
partition: "Common"
delegate_to: localhost
'''
RETURN = '''
monitor_type:
description: The contact that was set on the datacenter.
returned: changed
type: string
sample: "admin@root.local"
quorum:
description: The quorum that was set on the pool
returned: changed
type: int
sample: 2
monitors:
description: Monitors set on the pool.
returned: changed
type: list
sample: ['/Common/http', '/Common/gateway_icmp']
service_down_action:
description: Service down action that is set on the pool.
returned: changed
type: string
sample: "reset"
description:
description: Description set on the pool.
returned: changed
type: string
sample: "Pool of web servers"
lb_method:
description: The LB method set for the pool.
returned: changed
type: string
sample: "round-robin"
host:
description: IP of pool member included in pool.
returned: changed
type: string
sample: "10.10.10.10"
port:
description: Port of pool member included in pool.
returned: changed
type: int
sample: 80
slow_ramp_time:
description: The new value that is set for the slow ramp-up time.
returned: changed
type: int
sample: 500
reselect_tries:
description: The new value that is set for the number of tries to contact member
returned: changed
type: int
sample: 10
'''
import re
import os
from netaddr import IPAddress, AddrFormatError
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iControlUnexpectedHTTPError
)
class Parameters(AnsibleF5Parameters):
api_map = {
'loadBalancingMode': 'lb_method',
'slowRampTime': 'slow_ramp_time',
'reselectTries': 'reselect_tries',
'serviceDownAction': 'service_down_action'
}
updatables = [
'monitor_type', 'quorum', 'monitors', 'service_down_action',
'description', 'lb_method', 'slow_ramp_time', 'reselect_tries',
'host', 'port'
]
returnables = [
'monitor_type', 'quorum', 'monitors', 'service_down_action',
'description', 'lb_method', 'host', 'port', 'slow_ramp_time',
'reselect_tries', 'monitor', 'member_name', 'name', 'partition'
]
api_attributes = [
'description', 'name', 'loadBalancingMode', 'monitor', 'slowRampTime',
'reselectTries', 'serviceDownAction'
]
def __init__(self, params=None):
super(Parameters, self).__init__(params)
self._values['__warnings'] = []
@property
def lb_method(self):
lb_map = {
'ratio_node_address': 'ratio-node',
'dynamic_ratio': 'dynamic-ratio-node',
'least_connection_member': 'least-connections-member',
'least_connection_node_address': 'least-connections-node',
'fastest_node_address': 'fastest-node',
'observed_node_address': 'observed-node',
'predictive_node_address': 'predictive-node',
'weighted_least_connection_member': 'weighted-least-connections-member',
'weighted_least_connection_node_address': 'weighted-least-connections-node',
'ratio_least_connection_member': 'ratio-least-connections-member',
'ratio_least_connection_node_address': 'ratio-least-connections-node'
}
lb_method = self._values['lb_method']
if lb_method is None:
return None
spec = ArgumentSpec()
if lb_method in spec.lb_choice_removed:
raise F5ModuleError(
"The provided lb_method is not supported"
)
if lb_method in spec.lb_choice_deprecated:
self._values['__warnings'].append(
dict(
msg="The provided lb_method '{0}' is deprecated".format(lb_method),
version='2.4'
)
)
lb_method = lb_map.get(lb_method, lb_method.replace('_', '-'))
try:
assert lb_method in spec.lb_choice
except AssertionError:
raise F5ModuleError('Provided lb_method is unknown')
return lb_method
@property
def monitors(self):
monitors = list()
monitor_list = self._values['monitors']
monitor_type = self._values['monitor_type']
error1 = "The 'monitor_type' parameter cannot be empty when " \
"'monitors' parameter is specified."
error2 = "The 'monitor' parameter cannot be empty when " \
"'monitor_type' parameter is specified"
if monitor_list is not None and monitor_type is None:
raise F5ModuleError(error1)
elif monitor_list is None and monitor_type is not None:
raise F5ModuleError(error2)
elif monitor_list is None:
return None
for m in monitor_list:
if re.match(r'\/\w+\/\w+', m):
m = '/{0}/{1}'.format(self.partition, os.path.basename(m))
elif re.match(r'\w+', m):
m = '/{0}/{1}'.format(self.partition, m)
else:
raise F5ModuleError(
"Unknown monitor format '{0}'".format(m)
)
monitors.append(m)
return monitors
@property
def quorum(self):
value = self._values['quorum']
error = "Quorum value must be specified with monitor_type 'm_of_n'."
if self._values['monitor_type'] == 'm_of_n' and value is None:
raise F5ModuleError(error)
return value
@property
def monitor(self):
monitors = self.monitors
monitor_type = self._values['monitor_type']
quorum = self.quorum
if monitors is None:
return None
if monitor_type == 'and_list':
and_list = list()
for m in monitors:
if monitors.index(m) == 0:
and_list.append(m)
else:
and_list.append('and')
and_list.append(m)
result = ' '.join(and_list)
else:
min_list = list()
prefix = 'min {0} of {{'.format(str(quorum))
min_list.append(prefix)
for m in monitors:
min_list.append(m)
min_list.append('}')
result = ' '.join(min_list)
return result
@property
def host(self):
value = self._values['host']
if value is None:
return None
msg = "'%s' is not a valid IP address" % value
try:
IPAddress(value)
except AddrFormatError:
raise F5ModuleError(msg)
return value
@host.setter
def host(self, value):
self._values['host'] = value
@property
def port(self):
value = self._values['port']
if value is None:
return None
msg = "The provided port '%s' must be between 0 and 65535" % value
if value < 0 or value > 65535:
raise F5ModuleError(msg)
return value
@port.setter
def port(self, value):
self._values['port'] = value
@property
def member_name(self):
if self.host is None or self.port is None:
return None
mname = str(self.host) + ':' + str(self.port)
return mname
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(
self, self.api_map[api_attribute]
)
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations()
return result
def _announce_deprecations(self):
warnings = []
if self.want:
warnings += self.want._values.get('__warnings', [])
if self.have:
warnings += self.have._values.get('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(changed)
return True
return False
def _member_does_not_exist(self, members):
name = self.want.member_name
# Return False if name is None, so that we don't attempt to create it
if name is None:
return False
for member in members:
if member.name == name:
host, port = name.split(':')
self.have.host = host
self.have.port = int(port)
return False
return True
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have, members, poolres = self.read_current_from_device()
if not self.client.check_mode:
if self._member_does_not_exist(members):
self.create_member_on_device(poolres)
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the Pool")
return True
def create(self):
self._set_changed_options()
if self.client.check_mode:
return True
self.create_on_device()
if self.want.member_name:
self.have, members, poolres = self.read_current_from_device()
if self._member_does_not_exist(members):
self.create_member_on_device(poolres)
return True
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.ltm.pools.pool.create(
partition=self.want.partition, **params
)
def create_member_on_device(self, poolres):
poolres.members_s.members.create(
name=self.want.member_name,
partition=self.want.partition
)
def update_on_device(self):
params = self.want.api_params()
result = self.client.api.tm.ltm.pools.pool.load(
name=self.want.name,
partition=self.want.partition
)
result.modify(**params)
def exists(self):
return self.client.api.tm.ltm.pools.pool.exists(
name=self.want.name,
partition=self.want.partition
)
def remove_from_device(self):
result = self.client.api.tm.ltm.pools.pool.load(
name=self.want.name,
partition=self.want.partition
)
if self.want.member_name and self.want.port and self.want.pool:
member = result.members_s.members.load(
name=self.want.member_name,
partition=self.want.partition
)
if member:
member.delete()
self.delete_node_on_device()
else:
result.delete()
def read_current_from_device(self):
tmp_res = self.client.api.tm.ltm.pools.pool.load(
name=self.want.name,
partition=self.want.partition
)
members = tmp_res.members_s.get_collection()
result = tmp_res.attrs
return Parameters(result), members, tmp_res
def delete_node_on_device(self):
resource = self.client.api.tm.ltm.nodes.node.load(
name=self.want.host,
partition=self.want.partition
)
try:
resource.delete()
except iControlUnexpectedHTTPError as e:
# If we cannot remove it, it is in use, it is up to user to delete
# it later.
if "is referenced by a member of pool" in str(e):
return
else:
raise
class ArgumentSpec(object):
def __init__(self):
self.lb_choice_deprecated = [
'round_robin',
'ratio_member',
'least_connection_member',
'observed_member',
'predictive_member',
'ratio_node_address',
'least_connection_node_address',
'fastest_node_address',
'observed_node_address',
'predictive_node_address',
'dynamic_ratio',
'fastest_app_response',
'least_sessions',
'dynamic_ratio_member',
'ratio_session',
'weighted_least_connection_member',
'ratio_least_connection_member',
'weighted_least_connection_node_address',
'ratio_least_connection_node_address'
]
self.lb_choice_removed = [
'l3_addr'
]
self.lb_choice = [
'dynamic-ratio-member',
'dynamic-ratio-node',
'fastest-app-response',
'fastest-node',
'least-connections-member',
'least-connections-node',
'least-sessions',
'observed-member',
'observed-node',
'predictive-member',
'predictive-node',
'ratio-least-connections-member',
'ratio-least-connections-node',
'ratio-member',
'ratio-node',
'ratio-session',
'round-robin',
'weighted-least-connections-member',
'weighted-least-connections-node'
]
lb_choices = self.lb_choice_removed + self.lb_choice + self.lb_choice_deprecated
self.supports_check_mode = True
self.argument_spec = dict(
name=dict(
required=True,
aliases=['pool']
),
lb_method=dict(
choices=lb_choices
),
monitor_type=dict(
choices=[
'and_list', 'm_of_n'
]
),
quorum=dict(
type='int'
),
monitors=dict(
type='list'
),
slow_ramp_time=dict(
type='int'
),
reselect_tries=dict(
type='int'
),
service_down_action=dict(
choices=[
'none', 'reset',
'drop', 'reselect'
]
),
description=dict(),
host=dict(
aliases=['address'],
removed_in_version='2.4'
),
port=dict(
type='int',
removed_in_version='2.4'
)
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
|
'''Mock remote control of a flash movie clip.'''
__author__ = 'Ethan Kennerly'
import code_unit
# mock some features of ActionScript
from actionscript import *
def export_stage_dictionary_jsfl_example():
r'''To test AppData.../Commands/export_stage_dictionary.jsfl:
Open AppData.../Commands/rename_and_duplicate_recursively.fla
Root timeline. Command -> example_stage.py
Exports element types: movie clip, text, and button.
>>> file_name = r'C:\Users\Ethan\AppData\Local\Adobe\Flash CS4\en\Configuration\Commands\rename_and_duplicate_recursively.fla.stage.py'
>>> stage = load(file_name)
>>> stage['currentLabel']
'start'
>>> stage['old_1_mc']['currentLabel']
''
>>> stage['old_1_mc']['old_square_library_item']['currentLabel']
'square'
>>> stage['old_1_mc']['old_square_library_item']['t_txt']['text']
'T'
>>> stage['old_0_mc']['old_square_library_item']['t_txt']['text']
'T'
>>> stage['old_1_mc']['old_square_library_item']['t_txt']['text']
'T'
>>> stage['old_1_mc']['command_btn']
{}
>>> stage['locked_mc']['currentLabel']
''
Beware! Can have duplicate names, which dictionary silently parses.
Check Flash trace for error messages.
>>> stage['old_2_mc'].get('command_btn')
Example of a stage:
{'old_1_mc': {'currentLabel': '', 'old_square_library_item': {'currentLabel': 'square', 't_txt': {'text': 'T'}}, 'command_btn': {}, 'old_circle_library_item': {'currentLabel': ''}, 'old_circle_mc': {'currentLabel': ''}, 'old_square_mc': {'currentLabel': 'square', 't_txt': {'text': 'T'}}}, 'old_0_mc': {'currentLabel': '', 'old_square_library_item': {'currentLabel': 'square', 't_txt': {'text': 'T'}}, 'command_btn': {}, 'old_circle_library_item': {'currentLabel': ''}, 'old_circle_mc': {'currentLabel': ''}, 'old_square_mc': {'currentLabel': 'square', 't_txt': {'text': 'T'}}}, 'old_2_mc': {'currentLabel': '', 'old_square_library_item': {'currentLabel': 'square', 't_txt': {'text': 'T'}}, 'command_btn': {}, 'old_circle_library_item': {'currentLabel': ''}, 'old_circle_mc': {'currentLabel': ''}, 'old_square_mc': {'currentLabel': 'square', 't_txt': {'text': 'T'}}}}
>>> from user_as import save_file_name
>>> stage = load(save_file_name)
>>> stage['currentLabel']
'setup'
>>> stage['lobby_mc']['currentLabel']
'_main'
>>> stage = create_stage(save_file_name)
>>> stage['lobby_mc']['currentLabel']
'_main'
>>> stage['lobby_mc']['_00_mc']['capture_3_3_1_mc']['currentLabel']
'none'
In .fla, Flash animator sets user default value on first frame.
The stage is the template for reloading and resetting users.
For example, in Flash CS4 the score by default is initialized to zero.
>>> stage['score_mc']['bar_mc']['marker_mc']['capture_mc']['currentLabel']
'_0'
>>> stage['score_mc']['bar_mc']['currentLabel']
'_0'
'''
def is_example():
'''What type of InteractiveObject is this?
>>> _txt = TextField()
>>> isTextField(_txt)
True
>>> _mc = MovieClip()
>>> isMovieClip(_mc)
True
>>> _btn = SimpleButton()
>>> isSimpleButton(_btn)
True
>>> isMovieClip(_txt)
False
>>> isTextField(_mc)
False
>>> isSimpleButton(_txt)
False
>>> isSimpleButton(_mc)
False
'''
def note(owner, property, value):
'''
>>> root = get_example_stage()
>>> expected_message = {'title_mc': {'password_txt': {'text': 'p'}}}
>>> note(root.title_mc.password_txt, 'text', 'p')
{'title_mc': {'password_txt': {'text': 'p'}}}
>>> news = note(root.title_mc.password_txt, 'text', 'p')
>>> if not news == expected_message:
... news
... expected_message
'''
news = {}
# XXX Gotcha ActionScript interprets {a: b} as {'a': b}
news[property] = value
eldest = owner
context = {}
while eldest.parent:
context = {}
context[eldest.name] = news
news = context
eldest = eldest.parent
return news
def get_note(owner, property):
'''
>>> root = get_example_stage()
>>> get_note(root.title_mc.password_txt, 'text')
{'title_mc': {'password_txt': {'text': 'pass'}}}
'''
value = getattr(owner, property) # .as: owner[property]
return note(owner, property, value)
def address(owner):
'''String address specifying owner in the tree.
>>> root = get_example_stage()
>>> address(root)
'root'
>>> address(root.title_mc)
'root.title_mc'
>>> address(root.title_mc.password_txt)
'root.title_mc.password_txt'
>>> address(root.title_mc.password_txt.text)
Traceback (most recent call last):
...
AttributeError: 'str' object has no attribute 'name'
'''
def _name(eldest):
if eldest.name.startswith('root'):
return 'root'
else:
return eldest.name
eldest = owner
address = '%s' % _name(eldest)
while eldest.parent:
eldest = eldest.parent
address = '%s.%s' % (_name(eldest), address)
return address
def text_or_number(value):
'''unicode is not str but equals a string, and initial string may be null.
>>> u'' == ''
True
>>> type(u'') == type('')
False
>>> text_or_number(u'')
True
>>> text_or_number('')
True
>>> text_or_number(None)
True
>>> text_or_number(175)
True
>>> text_or_number(175.5)
True
>>> text_or_number({1: 1})
False
>>> text_or_number(String(''))
True
'''
return type(value) == type('') \
or type(value) == type(u'') \
or type(value) == type(1) \
or type(value) == type(0.5) \
or type(value) == type(String('')) \
or value is None
def as_object_to_dict(as_object):
'''
>>> import pyamf
>>> pyamf.ASObject({'a': 1})
{'a': 1}
>>> a = pyamf.ASObject({'a': 1})
>>> as_object_to_dict(a)
{'a': 1}
>>> type(a)
<class 'pyamf.ASObject'>
>>> type(as_object_to_dict(a))
<type 'dict'>
'''
dict_string = dict.__repr__(as_object)
return eval(dict_string)
def upgrade(old, news):
'''Recursively replace old strings (or None) in dictionary
except if they are news dictionaries.
>>> old = {'a': {'b': '2', 'c': '3'}}
>>> news = {'a': {'b': '4'}}
>>> upgrade(old, news)
{'a': {'c': '3', 'b': '4'}}
>>> old = {'cat': {'hat': 'floppy', 'plate': {'ham': u'green', 'eggs': u'green'}}}
>>> news = {'cat': {'plate': {'ham': u'red'}}}
>>> upgrade(old, news)
{'cat': {'plate': {'eggs': u'green', 'ham': u'red'}, 'hat': 'floppy'}}
>>> news = {'cat': {'plate': {'ham': None}}}
>>> upgrade(old, news)
{'cat': {'plate': {'eggs': u'green', 'ham': None}, 'hat': 'floppy'}}
>>> upgrade({}, {'a': '1'})
{'a': '1'}
>>> upgrade({'a': {'b': u'2'}}, {'a': {}})
{'a': {'b': u'2'}}
>>> upgrade({'a': {'b': u'2'}}, {'a': {'b': u'2', 'c': u'3'}})
{'a': {'c': u'3', 'b': u'2'}}
Beware that pyamf uses ASObject, not dictionary
>>> import pyamf
>>> as_object = pyamf.ASObject({'a': {'b': u'2', 'c': u'3'}})
>>> upgrade({'a': {'b': u'2'}}, as_object)
{'a': {'c': u'3', 'b': u'2'}}
New string or null values on old subdictionaries are ignored.
>>> old_log_level = logging.getLogger().level
>>> logging.getLogger().setLevel(logging.CRITICAL)
>>> upgrade({'a': {'b': u'2'}}, {'a': None})
{'a': {'b': u'2'}}
>>> upgrade({'a': {'b': '2'}}, {'a': u'1'})
{'a': {'b': '2'}}
>>> upgrade({'a': {'b': {}, 'c': u'3'}}, {'a': {'c': u'1'}})
{'a': {'c': u'1', 'b': {}}}
>>> logging.getLogger().setLevel(old_log_level)
Replace x and y position.
>>> old_position = {'formation_field_mc': {'x': 1643, 'y': 975}}
>>> new_position = {'formation_field_mc': {'y': 175, 'x': 175}}
>>> upgraded = upgrade(old_position, new_position)
>>> if not new_position == upgraded: new_position, upgraded
>>> old_position = {'formation_field_mc': {'rotate_0_mc': {'response_mc': {'currentLabel': u'none'} }, 'x': 1643, 'y': 975}}
>>> new_position = {'formation_field_mc': {'y': 175, 'x': 175, 'rotate_0_mc': {'response_mc': {'currentLabel': 'response'}}}}
>>> upgraded = upgrade(old_position, new_position)
>>> if not new_position == upgraded: new_position, upgraded
'''
for key in news:
old_value = old.get(key) # .as: old[key]
value = news[key]
if old_value != value:
if not old.get(key):
old[key] = value
elif text_or_number(old.get(key)) and text_or_number(value):
old[key] = value
elif resembles_dictionary(old.get(key)) and resembles_dictionary(value):
old[key] = upgrade(old.get(key), value)
else:
logging.error('upgrade: i did not expect old_value: ' \
+ str(old_value)
+ ', value: ' + str(value))
## import pdb; pdb.set_trace();
return old
def insert_label(movie_clip, message):
'''label of clip.'''
message['currentLabel'] = movie_clip.currentLabel
return message
def insert_label_and_position(movie_clip, message):
'''integer x,y is smaller though less accurate than float.
Breaks animations on clip.'''
message['currentLabel'] = movie_clip.currentLabel
message['x'] = int(movie_clip.x)
message['y'] = int(movie_clip.y)
return message
def _family_tree(root, message, describe):
'''Recursively transcribe dictionary of text and movie clips.
Include the x and y positions of movieclips, but not of text.
>>> root = get_example_stage()
>>> title_family = {'start_btn': {}, 'username_txt': {'text': 'user'}, 'currentLabel': None, 'password_txt': {'text': 'pass'}}
>>> title_family_xy = {'start_btn': {}, 'username_txt': {'text': 'user'}, 'currentLabel': None, 'password_txt': {'text': 'pass'}, 'x': 0, 'y': 0}
>>> tree = family_tree(root['title_mc'])
>>> code_unit.dict_diff(title_family, tree)
Optionally include xy.
>>> tree = _family_tree(root['title_mc'], {}, insert_label_and_position)
>>> code_unit.dict_diff(title_family_xy, tree)
Strip out nodes that start with 'instance',
which in Flash is the default name for unnamed MovieClips and so on.
>>> instance = MovieClip()
>>> instance.name = 'instance999'
>>> root['title_mc'].addChild(instance)
>>> tree = family_tree(root['title_mc'])
>>> code_unit.dict_diff(title_family, tree)
The root should therefore have a name.
>>> tree = family_tree(root)
>>> 1 <= len(tree)
True
'''
if not hasName(root):
pass
elif isTextField(root):
message['text'] = root.text
elif isMovieClip(root):
message = describe(root, message)
for c in range(root.numChildren):
child = root.getChildAt(c)
if hasName(child):
child_message = {}
message[child.name] = child_message
_family_tree(child, child_message, describe)
return message
def family_tree(root):
return _family_tree(root, {}, insert_label)
def compose_root(describe, *named_txt_or_mc_array):
'''aggregate multiple objects (at root level only)
TODO: for children to stay children, cite ancestors
>>> compose_root(insert_label, MovieClip())
{}
'''
message = {}
for named_txt_or_mc in named_txt_or_mc_array:
object_dict = _family_tree(named_txt_or_mc, {}, describe)
if {} != object_dict:
object_name = named_txt_or_mc.name
message[object_name] = object_dict
return message
#def compose_root(*named_txt_or_mc_array):
# return _compose_root(insert_label, *named_txt_or_mc_array)
#def compose_root_position(*named_txt_or_mc_array):
# return _compose_root(insert_label_and_position, *named_txt_or_mc_array)
## // how can i send ... array to another function?
#def send_root(ambassador, *named_txt_or_mc_array):
# '''sends multiple objects (at root level only)
# TODO: for children to stay children, cite ancestors
# >>> from mock_client import echo_protocol_class
# >>> send_root(echo_protocol_class(), MovieClip())
# '''
# message = compose_root(*named_txt_or_mc_array)
# ambassador.send(message)
def is_simple_property(property):
'''Is this a position or scale?
>>> is_simple_property('x')
True
>>> is_simple_property('scaleY')
True
>>> is_simple_property('z')
False
'''
var = simple_properties = ['scaleX', 'scaleY', 'x', 'y'];
for p in range(len(simple_properties)):
var = simple = simple_properties[p];
if (simple == property):
return true;
return false;
def update_family_tree(display_object, news):
'''Recursively update from dictionary of text and movie clips.
ActionScript grumbles about reassigning root, so modify display_object in place.
>>> root = get_example_stage()
>>> root['gateway_mc']['currentLabel']
'none'
>>> news = {'gateway_mc': {'currentLabel': 'password'}}
>>> olds = update_family_tree(root, news)
>>> root['gateway_mc']['currentLabel']
'password'
Revert
>>> olds
{'gateway_mc': {'currentLabel': 'none'}}
>>> reverted = update_family_tree(root, olds)
>>> root['gateway_mc']['currentLabel']
'none'
>>> if not news == reverted:
... news
... reverted
Update text. For easy doctesting, convert legible unicode to string.
>>> root['title_mc']['username_txt'].text
'user'
>>> news = {'title_mc': {'username_txt': {'text': u'joris'}}}
>>> olds = update_family_tree(root, news)
>>> root['title_mc']['username_txt'].text
'joris'
Revert
>>> reverted = update_family_tree(root, olds)
>>> root['title_mc']['username_txt'].text
'user'
>>> if not news == reverted:
... news
... reverted
Update position.
>>> root['gateway_mc']['x'], root['gateway_mc']['y']
(0, 0)
>>> news = {'gateway_mc': {'x': 1, 'y': 2}}
>>> olds = update_family_tree(root, news)
>>> root['gateway_mc']['x'], root['gateway_mc']['y']
(1, 2)
Revert
>>> reverted = update_family_tree(root, olds)
>>> root['gateway_mc']['x'], root['gateway_mc']['y']
(0, 0)
>>> if not news == reverted:
... news
... reverted
If no such parameter, log error.
>>> old_log_level = logging.getLogger().level
>>> logging.getLogger().setLevel(logging.CRITICAL)
>>> no_such = {'gateway_mc': {'free_lunch': 'avocado'}}
>>> old = update_family_tree(root, no_such)
>>> logging.getLogger().setLevel(old_log_level)
Ignore dispatchEvent
Do not dispatch a mouse event. Only a few MouseEvents supported.
>>> event = {'title_mc': {'start_btn': {'dispatchEvent': 'mouseDown'}}}
>>> olds = update_family_tree(root, event)
Do not dispatch press to a movie clip.
>>> root.title_mc.addEventListener(MouseEvent.MOUSE_DOWN, trace_event)
>>> press_title = {'title_mc': {'dispatchEvent': 'mouseDown'}}
>>> olds = update_family_tree(root, press_title)
If nothing changed, then return no olds.
>>> press_title = {'title_mc': {'dispatchEvent': 'mouseDown'}}
>>> label = root.currentLabel
>>> olds = update_family_tree(root, {'currentLabel': label})
>>> olds
{}
>>> label = root.title_mc.currentLabel
>>> olds = update_family_tree(root, {'title_mc': {'currentLabel': label}})
>>> olds
{}
Update scaleX and scaleY.
>>> scale = {'_0_0_mc': {'scaleX': 2.5, 'scaleY': 2.5}}
>>> olds = update_family_tree(root, scale)
>>> root._0_0_mc.scaleX
2.5
>>> root._0_0_mc.scaleY
2.5
# i do not need parent.
#Adopt an orphan and rebrand its label.
#>>> root['gateway_mc'].currentLabel
#'password'
#>>> adoption = {'gateway_mc': {'parent': '_1_0_mc', 'currentLabel': 'response'}}
#>>> old = update_family_tree(root, adoption)
#>>> root['_1_0_mc']['gateway_mc'].currentLabel
#'response'
#>>> root['gateway_mc']
#<type 'exceptions.ReferenceError'>
'''
var = olds = {};
for property in news:
var = value = news[property];
if (not display_object):
var = missing_news = 'update_family_tree: display_object is missing ' \
+ str(property) + ', news: ' + str(news);
logging.error(missing_news);
continue;
if (property == 'dispatchEvent'):
continue;
if (isMovieClip(display_object)):
if (property == 'currentLabel'):
if (value != display_object[property]):
olds[property] = display_object[property];
var = label = value;
label = unicode_to_string(label);
display_object.gotoAndPlay(label);
elif (is_simple_property(property)):
## XXX *>_<* Gotcha! when code controls a movie clip
# (to place at x,y position for example,
# this breaks the animation on that clip, such as gotoAndPlay
if (value != display_object[property]):
olds[property] = display_object[property];
display_object[property] = value;
else:
var = child = display_object.getChildByName(property);
if (child):
if (isMovieTextButton(display_object[property])):
var = changes = update_family_tree(
display_object[property], value);
if (changes):
olds[property] = changes;
else:
logging.error('update_family_tree: ' + str(property)
+ '? = ' + str(value));
elif (isTextField(display_object)):
if (property == 'text'):
if (value != display_object[property]):
olds[property] = display_object[property];
var = text = value;
text = unicode_to_string(text);
display_object[property] = text;
logging.debug('update_family_tree: olds=' + str(olds));
return olds;
def dispatch_family_tree(display_object, news):
'''
>>> root = get_example_stage()
Dispatch a mouse event. Only a few MouseEvents supported.
>>> event = {'title_mc': {'start_btn': {'dispatchEvent': 'mouseDown'}}}
>>> dispatch_family_tree(root, event)
trace_event: mouseDown
Cannot revert dispatching an event.
Therefore, all necessary results should be
embedded into x, y, or label of movie clip or text.
>>> none = {'title_mc': {'start_btn': {'dispatchEvent': 'none'}}}
>>> dispatch_family_tree(root, none)
Dispatch press to a movie clip.
>>> root.title_mc.addEventListener(MouseEvent.MOUSE_DOWN, trace_event)
>>> press_title = {'title_mc': {'dispatchEvent': 'mouseDown'}}
>>> dispatch_family_tree(root, press_title)
trace_event: mouseDown
Update scaleX and scaleY quietly.
>>> scale = {'_0_0_mc': {'scaleX': 2.5, 'scaleY': 2.5}}
>>> olds = dispatch_family_tree(root, scale)
'''
# Easier to convert to ActionScript than: property, value in news.items()
for property in news:
var = value = news[property];
if (not display_object):
var = missing_news = 'update_family_tree: display_object is missing ' \
+ str(property) + ', news: ' + str(news);
logging.error(missing_news);
continue;
if (property == 'dispatchEvent'):
var = event_type = value;
# XXX Gotcha ActionScript requires 'new MouseEvent(...)'
# but does not bark while compiling. at runtime:
# TypeError: Error #1034: Type Coercion failed:
# cannot convert "mouseDown" to flash.events.MouseEvent.
var = event = new = MouseEvent(event_type);
# XXX Gotcha ActionScript ReferenceError: Error #1074:
# Illegal write to read-only property currentTarget
# on flash.events.MouseEvent.
#- event.currentTarget = display_object
display_object.dispatchEvent(event);
continue;
if (isMovieClip(display_object)):
if (property != 'currentLabel' \
and not is_simple_property(property)):
var = child = display_object.getChildByName(property);
if (child):
if (isMovieTextButton(display_object[property])):
dispatch_family_tree(
display_object[property], value);
else:
logging.error('dispatch_family_tree: ' + str(property)
+ '? = ' + str(value));
def imitate_news(root, news, log_news = None):
'''Read news and act.
if no news, do nothing.
ActionScript grumbles about reassigning root, so modify root in place.
>>> root = get_example_stage()
>>> root.gateway_mc.currentLabel
'none'
>>> no_news = {}
>>> olds = imitate_news(root, no_news)
>>> root.gateway_mc.currentLabel
'none'
>>> olds
{}
Other fields not in news are not removed.
>>> new_name = {'title_mc': {'username_txt': {'text': 'joris'}}}
>>> olds = imitate_news(root, new_name)
>>> root['title_mc']['username_txt']['text']
'joris'
>>> root['title_mc']['password_txt']['text']
'pass'
May revert.
>>> reverted = imitate_news(root, olds)
>>> root['title_mc']['username_txt']['text']
'user'
>>> root['title_mc']['password_txt']['text']
'pass'
Ignore invalid news.
>>> imitate_news(root, 'a')
imitate_news: i expect a dictionary
>>> def p(cite, news): print cite, news
>>> olds = imitate_news(root, new_name, log_news = p)
imitate_news {'title_mc': {'username_txt': {'text': 'joris'}}}
1) Update. 2) Dispatch.
>>> text_dispatch_news = {'title_mc': {'username_txt': {'text': 'jade'},
... 'start_btn': {'dispatchEvent': 'mouseDown'}}}
>>> root.title_mc.start_btn.addEventListener(MouseEvent.MOUSE_DOWN,
... trace_username)
>>> olds = imitate_news(root, text_dispatch_news)
jade
Ignore 'info' yet retain it.
>>> text_dispatch_news = {'title_mc': {'username_txt': {'text': 'jade'},
... 'start_btn': {'dispatchEvent': 'mouseDown'}}}
>>> info = {'info': {'_2_2_mc': []}}
>>> info_news = upgrade(text_dispatch_news, info)
>>> olds = imitate_news(root, info_news)
jade
>>> info_news['info']
{'_2_2_mc': []}
>>> info_news['info'] = {}
>>> olds = imitate_news(root, info_news)
jade
>>> info_news['info']
{}
'''
if (news is not None and resembles_dictionary(news)):
if (null != log_news):
log_news('imitate_news', news);
# logging.info('imitate_news: %s' % get_keywords(news))
# logging.debug('imitate_news: ' + str(news));
var = info = {'none': true};
if (undefined != news.get('info')):
info = news.get('info');
del news['info'];
var = olds = update_family_tree(root, news);
dispatch_family_tree(root, news);
if ({'none': true} != info):
news['info'] = info;
return olds;
elif (news is not None and not resembles_dictionary(news)):
# XXX log writes to stderr so doctest does not catch it.
trace('imitate_news: i expect a dictionary');
#trace('imitate_news: i expect a dictionary not ' \
# + code_unit.represent(news) );
if (null != log_news):
log_news('cannot imitate_news', news);
from text import sort_words
def get_keywords(news):
'''Summarize and sort news by top-level keys and labels.
>>> news = {'currentLabel': 'table', '_0_0_mc': {'currentLabel': 'black'}}
>>> get_keywords(news)
':table _0_0_mc:black'
>>> news = {'currentLabel': None, '_0_0_mc': {'currentLabel': 'black'}}
>>> get_keywords(news)
' _0_0_mc:black'
>>> news = {'currentLabel': 2, '_0_0_mc': {'currentLabel': 'black'}}
>>> get_keywords(news)
':2 _0_0_mc:black'
>>> news = {'currentLabel': '', '_0_0_mc': {'currentLabel': 'black'}}
>>> get_keywords(news)
': _0_0_mc:black'
Only list top level key.
>>> news = {'lobby_mc': {'_0_mc': {'enter_mc': {'currentLabel': 'enter'}}, 'enter_mc': {'currentLabel': 'enter'}}}
>>> get_keywords(news)
' lobby_mc'
Ignore other values.
>>> news = {'_3_3_mc': {'currentLabel': 'question_black'}, 'time': 445}
>>> get_keywords(news)
' _3_3_mc:question_black'
'''
var = log_str = '';
for name in news:
if ('currentLabel' == name and news[name] is not None):
log_str += ':' + str(news[name]);
elif (news[name]):
if (type(news[name]) == Object):
log_str += ' ' + name;
for item in news[name]:
if ('currentLabel' == item \
and news[name][item] != None):
log_str += ':' + str(news[name][item]);
return sort_words(log_str);
def trace_username(mouse_event):
name = mouse_event.currentTarget.parent.username_txt.text
trace(name)
# example mock stage.
# following functions do not appear in ActionScript client.
def get_small_tree():
return {
'currentLabel': 'login',
'title_mc': {
'username_txt': {'text': 'user'},
'password_txt': {'text': 'pass'},
'start_btn': {},
'currentLabel': None, },
'gateway_mc': {
'currentLabel': 'none', },
'x': 0,
'y': 0
}
def remember_family(news):
'''Create a mock stage from tree.
>>> remember_family(None)
Name unnamed root so that it will be parsed.
>>> remember_family({}).name
'root1'
>>> shrub = get_small_tree()
>>> remember_family(shrub).name
'root1'
Convert unicode to string.
>>> root = remember_family({'currentLabel': u'none'})
>>> root.currentLabel
'none'
Create children.
>>> root = remember_family(shrub)
>>> MovieClip == type(root.title_mc)
True
>>> root.title_mc.username_txt.text
'user'
Not moved yet, so may gotoAndPlay.
>>> root._moved
False
>>> root.currentLabel
'login'
>>> root.gotoAndPlay('a')
>>> root.currentLabel
'a'
'''
if not resembles_dictionary(news):
return
root = MovieClip()
root.name = 'root1'
root = remember_children(root, news)
return root
def create_stage(save_file_name):
'''Mock Flash stage.
Slow. Takes more than 0.1 to load client stage.
>>> import timeit
>>> setup_code = 'from user_as import save_file_name; from remote_control import create_stage'
>>> stmt_code = 'root = create_stage(save_file_name)'
>>> timer = timeit.Timer(stmt = stmt_code, setup = setup_code)
>>> setup_second = timer.timeit(1)
>>> if not 0.1 <= setup_second: setup_second
'''
tree = load(save_file_name)
if tree and tree['gateway_mc'] != 'save_not_found':
root = remember_family(tree)
else:
logging.warn('create_stage: stage file not found')
root = get_example_stage()
return root
# master
def promote_to_master(globe, root):
'''Remote slave performs operations reports back to master.
'''
if not hasName(root):
pass
elif isTextField(root):
print 'todo'
message['text'] = root.text
elif isMovieClip(root):
message = insert_label_and_position(root, message)
for c in range(root.numChildren):
child = root.getChildAt(c)
if hasName(child):
child_message = {}
message[child.name] = child_message
promote_to_master(child, child_message)
return message
# server only
class stage_borg:
'''Multiple instances share same state.
http://code.activestate.com/recipes/66531/'''
__shared_state = {}
from user_as import save_file_name
tree = load(save_file_name)
def __init__(self):
self.__dict__ = self.__shared_state
def refer_to_stage(save_file_name):
'''
setup of about 20 users takes about 10 seconds.
most of get_start_problem_example is spent in setup_users.
profile/get_start_problem_example.profile.png
most of this time is creating the movie clips from the stage.
profile/do_setup_client.profile.png
remember_children
in most tests most of these users are not used and most of the clips of a user are not used.
lazily load movie clip.
TODO: load stage file into dictionary inside a stage borg.
TODO: each user refers to stage borg.
getChildByName
if child not found, trace lineage.
look for lineage in dictionary of stage borg.
if found, then remember that child, but none of its descendents.
>>> from user_as import save_file_name
>>> moonhyoung_root = refer_to_stage(save_file_name)
>>> moonhyoung_root.name
'root1'
>>> moonhyoung_root.currentLabel
'setup'
>>> moonhyoung_root._1_2_mc.currentLabel
'empty_black'
>>> moonhyoung_root._1_2_mc.territory_mc.currentLabel
'neutral'
Quick. Less than 0.1 seconds to load client stage.
>>> import timeit
>>> setup_code = 'from user_as import save_file_name; from remote_control import refer_to_stage'
>>> stmt_code = 'root = refer_to_stage(save_file_name)'
>>> timer = timeit.Timer(stmt = stmt_code, setup = setup_code)
>>> setup_second = timer.timeit(10)
>>> if not setup_second <= 0.99: setup_second
'''
stage = stage_borg()
tree = stage.tree
if tree and tree['gateway_mc'] != 'save_not_found':
root = MovieClip()
root.name = 'root1'
root = remember_children(root, tree, recurse = False, update = True)
else:
logging.error('create_stage: stage file not found')
return root
def unique_family_tree(root, message):
'''Recursively transcribe dictionary of text and movie clips
that do not have an orphanage (and so are different from template).
>>> from user_as import save_file_name
>>> root = refer_to_stage(save_file_name)
>>> title_family = {'start_btn': {}, 'username_txt': {'text': 'user'}, 'currentLabel': None, 'password_txt': {'text': 'pass'}}
>>> tree = unique_family_tree(root['title_mc'], {})
>>> tree
{'currentLabel': 'none'}
Strip out nodes that start with 'instance',
which in Flash is the default name for unnamed MovieClips and so on.
>>> instance = MovieClip()
>>> instance.name = 'instance999'
>>> root['title_mc'].addChild(instance)
>>> tree = unique_family_tree(root, {})
>>> tree.get('title_mc')
{'currentLabel': 'none'}
>>> tree = unique_family_tree(root['title_mc'], {})
>>> tree
{'currentLabel': 'none'}
include current label.
>>> root.title_mc.gotoAndPlay('new')
>>> root.title_mc.currentLabel
'new'
>>> tree = unique_family_tree(root['title_mc'], {})
>>> root.title_mc.currentLabel
'new'
>>> tree
{'currentLabel': 'new'}
>>> root.title_mc.currentLabel
'new'
The root should therefore have a name.
Text fields do not refer to orphanage, so are always included.
>>> root.title_mc.username_txt.text = 'moonhyoung'
>>> root.title_mc.currentLabel
'new'
>>> tree = unique_family_tree(root['title_mc'], {})
>>> from pprint import pprint
>>> tree['currentLabel']
'new'
>>> tree['username_txt']
{'text': 'moonhyoung'}
'''
if not hasName(root):
pass
elif isTextField(root):
message['text'] = root.text
elif isMovieClip(root):
#message = describe(root, message)
message['currentLabel'] = root.currentLabel
if not root._orphanage:
child_count = root.numChildren
for c in range(child_count):
child = root.getChildAt(c)
if hasName(child):
child_message = unique_family_tree(child, {})
if child_message:
message[child.name] = child_message
return message
def _log_text(news, context, texts, logs):
for key, value in news.items():
if 'text' == key:
in_context = False
for text in texts:
if text in context:
in_context = True
if in_context:
entry = '>>> %s.text = "%s"' % (context, value)
logs.append(entry)
elif resembles_dictionary(news.get(key)) and resembles_dictionary(value):
subcontext = '%s.%s' % (context, key)
_log_text(news.get(key), subcontext, texts, logs)
def log_dispatchEvent(news, context, texts):
r'''dispatchEvent and some text fields.
Lukasz toggles score. Server logs.
>>> news = {'option_mc': {'score_mc': {'enter_mc': {'dispatchEvent': 'mouseDown'}}}}
>>> log_dispatchEvent(news, 'lukasz.root', [])
['>>> lukasz.root.option_mc.score_mc.enter_mc.dispatchEvent(mouseDown)']
If multiple events, separate by a newline.
>>> news = {'option_mc': {'score_mc': {'enter_mc': {'dispatchEvent': 'mouseDown'}}, 'prohibit_danger_mc': {'enter_mc': {'dispatchEvent': 'mouseDown'}}}}
>>> log_dispatchEvent(news, 'lukasz.root', [])
['>>> lukasz.root.option_mc.score_mc.enter_mc.dispatchEvent(mouseDown)', '>>> lukasz.root.option_mc.prohibit_danger_mc.enter_mc.dispatchEvent(mouseDown)']
If watching chat text, log chat text.
>>> news = {'chat_input_mc': {'currentLabel': 'none', 'dispatchEvent': 'mouseDown'}, 'chat_input_txt': {'text': 'hello'}}
>>> log_dispatchEvent(news, 'lukasz.root', [])
['>>> lukasz.root.chat_input_mc.dispatchEvent(mouseDown)']
>>> log_dispatchEvent(news, 'lukasz.root', ['chat_input_txt'])
['>>> lukasz.root.chat_input_txt.text = "hello"', '>>> lukasz.root.chat_input_mc.dispatchEvent(mouseDown)']
'''
logs = []
_log_text(news, context, texts, logs)
_log_dispatchEvent(news, context, logs)
return logs
def _log_dispatchEvent(news, context, logs):
for key, value in news.items():
if 'dispatchEvent' == key:
entry = '>>> %s.dispatchEvent(%s)' % (context, value)
logs.append(entry)
elif resembles_dictionary(news.get(key)) \
and resembles_dictionary(value):
subcontext = '%s.%s' % (context, key)
_log_dispatchEvent(news.get(key), subcontext, logs)
def change(old, news):
'''Recursively report changes of old strings (or None) in dictionary
except if they are news dictionaries. Ignore extra old or new members.
>>> old = {'purse': {'silver': '2', 'gold': '3'}}
>>> news = {'purse': {'gold': '4'}}
>>> change(old, news)
{'purse': {'gold': '4'}}
>>> old = {'cat': {'hat': 'floppy', 'plate': {'ham': u'green', 'eggs': u'green'}}}
>>> news = {'cat': {'plate': {'ham': u'red'}}}
>>> change(old, news)
{'cat': {'plate': {'ham': u'red'}}}
>>> news = {'cat': {'plate': {'ham': None}}}
>>> change(old, news)
{'cat': {'plate': {'ham': None}}}
Ignore extra new members.
>>> change({}, {'a': '1'})
{}
>>> change({'a': {'b': u'2'}}, {'a': {'b': u'2', 'c': u'3'}})
{}
>>> change({'a': {'b': u'2'}}, {'a': {}})
{}
Beware that pyamf uses ASObject, not dictionary
>>> import pyamf
>>> as_object = pyamf.ASObject({'a': {'b': u'2', 'c': u'3'}})
>>> change({'a': {'b': u'2'}}, as_object)
{}
New string or null values on old subdictionaries are ignored.
>>> old_log_level = logging.getLogger().level
>>> logging.getLogger().setLevel(logging.CRITICAL)
>>> change({'a': {'b': u'2'}}, {'a': None})
{}
>>> change({'a': {'b': '2'}}, {'a': u'1'})
{}
>>> change({'a': {'b': {}, 'c': u'3'}}, {'a': {'c': u'1'}})
{'a': {'c': u'1'}}
>>> logging.getLogger().setLevel(old_log_level)
Replace x and y position.
>>> old_position = {'formation_field_mc': {'x': 1643, 'y': 975}}
>>> new_position = {'formation_field_mc': {'y': 175, 'x': 175}}
>>> changed = change(old_position, new_position)
>>> if not new_position == changed: new_position, changed
>>> old_position = {'formation_field_mc': {'rotate_0_mc': {'response_mc': {'currentLabel': u'none'} }, 'x': 1643, 'y': 975}}
>>> new_position = {'formation_field_mc': {'y': 175, 'x': 175, 'rotate_0_mc': {'response_mc': {'currentLabel': 'response'}}}}
>>> changed = change(old_position, new_position)
>>> if not new_position == changed: new_position, changed
'''
changed = {}
for key in old:
old_value = old.get(key) # .as: old[key]
value = news.get(key)
if news.has_key(key) and old_value != value:
if not old.get(key):
changed[key] = value
elif text_or_number(old.get(key)) and text_or_number(value):
changed[key] = value
elif resembles_dictionary(old.get(key)) \
and resembles_dictionary(value):
child_changed = change(old.get(key), value)
if child_changed:
changed[key] = child_changed
else:
logging.error('change: i did not expect old_value: ' \
+ str(old_value)
+ ', value: ' + str(value))
## import pdb; pdb.set_trace();
return changed
def get_branch(root, keys):
r'''Get children of dictionary by names.
>>> from pprint import pprint
>>> pprint( get_branch({'a': {'d': 4}, 'b': 2, 'c': 3}, ['a', 'b']) )
{'a': {'d': 4}, 'b': 2}
>>> pprint( get_branch({'b': 2}, ['a', 'b']) )
{'b': 2}
>>> pprint( get_branch({'c': 3}, ['a', 'b']) )
{}
'''
branches = {}
for key in keys:
if key in root:
branches[key] = root[key]
return branches
def get_lineage(owner, property):
'''List of names.
>>> root = get_example_stage()
>>> get_lineage(root.title_mc.username_txt, 'text')
['title_mc', 'username_txt', 'text']
'''
lineage = [property]
eldest = owner
def _child_name(eldest):
if eldest.name.startswith('root'):
return
else:
return eldest.name
while eldest.parent:
name = _child_name(eldest)
if name:
lineage.insert(0, name)
eldest = eldest.parent
return lineage
def get_latest(news, owner, property):
'''Get label from news at address.
If not in news, get from owner's property.
>>> root = get_example_stage()
>>> get_latest({}, root.title_mc.username_txt, 'text')
'user'
>>> get_latest({'title_mc': {'username_txt': {'text': 'other'}}}, root.title_mc.username_txt, 'text')
'other'
'''
lineage = get_lineage(owner, property)
parent = news
for child in lineage:
if parent.has_key(child):
parent = parent[child]
if parent:
return parent
return getattr(owner, property) # .as: owner[property]
def get_grandchild_by_name(grandparent, grandchild_name):
'''First grandchild by that name.
>>> root = get_example_stage()
>>> _mc = get_grandchild_by_name(root, 'username_txt')
>>> _mc.name
'username_txt'
>>> _mc.parent.name
'title_mc'
>>> get_grandchild_by_name(root, 'usernaam_txt')
>>> get_grandchild_by_name(root.title_mc, 'usernaam_txt')
'''
for p in range(grandparent.numChildren):
parent = grandparent.getChildAt(p)
if hasattr(parent, 'getChildByName'):
grandchild = parent.getChildByName(grandchild_name)
if grandchild:
return grandchild
#for gc in range(parent.numChildren):
# grandchild = grandparent.getChildAt(gc)
# print grandchild.name
# if grandchild.name == grandchild_name:
# return grandchild
if __name__ == '__main__':
import code_unit
import sys
code_unit.test_file_args('./remote_control.py', sys.argv,
locals(), globals())
#+ print
#+ print
#+ print
#+ print 'remote_control.py starts testing ...',
#+ import doctest
#+ doctest.testmod()
#+ print '... and finishes testing.'
#+ print
|
|
"""
Support for the LIFX platform that implements lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.lifx/
"""
import logging
import asyncio
import sys
import math
from os import path
from functools import partial
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.light import (
Light, DOMAIN, PLATFORM_SCHEMA, LIGHT_TURN_ON_SCHEMA,
ATTR_BRIGHTNESS, ATTR_BRIGHTNESS_PCT, ATTR_COLOR_NAME, ATTR_RGB_COLOR,
ATTR_XY_COLOR, ATTR_COLOR_TEMP, ATTR_KELVIN, ATTR_TRANSITION, ATTR_EFFECT,
SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, SUPPORT_RGB_COLOR,
SUPPORT_XY_COLOR, SUPPORT_TRANSITION, SUPPORT_EFFECT,
VALID_BRIGHTNESS, VALID_BRIGHTNESS_PCT,
preprocess_turn_on_alternatives)
from homeassistant.config import load_yaml_config_file
from homeassistant.const import ATTR_ENTITY_ID, EVENT_HOMEASSISTANT_STOP
from homeassistant import util
from homeassistant.core import callback
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.service import extract_entity_ids
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['aiolifx==0.5.0', 'aiolifx_effects==0.1.0']
UDP_BROADCAST_PORT = 56700
CONF_SERVER = 'server'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_SERVER, default='0.0.0.0'): cv.string,
})
SERVICE_LIFX_SET_STATE = 'lifx_set_state'
ATTR_INFRARED = 'infrared'
ATTR_POWER = 'power'
LIFX_SET_STATE_SCHEMA = LIGHT_TURN_ON_SCHEMA.extend({
ATTR_INFRARED: vol.All(vol.Coerce(int), vol.Clamp(min=0, max=255)),
ATTR_POWER: cv.boolean,
})
SERVICE_EFFECT_PULSE = 'lifx_effect_pulse'
SERVICE_EFFECT_COLORLOOP = 'lifx_effect_colorloop'
SERVICE_EFFECT_STOP = 'lifx_effect_stop'
ATTR_POWER_ON = 'power_on'
ATTR_MODE = 'mode'
ATTR_PERIOD = 'period'
ATTR_CYCLES = 'cycles'
ATTR_SPREAD = 'spread'
ATTR_CHANGE = 'change'
PULSE_MODE_BLINK = 'blink'
PULSE_MODE_BREATHE = 'breathe'
PULSE_MODE_PING = 'ping'
PULSE_MODE_STROBE = 'strobe'
PULSE_MODE_SOLID = 'solid'
PULSE_MODES = [PULSE_MODE_BLINK, PULSE_MODE_BREATHE, PULSE_MODE_PING,
PULSE_MODE_STROBE, PULSE_MODE_SOLID]
LIFX_EFFECT_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_POWER_ON, default=True): cv.boolean,
})
LIFX_EFFECT_PULSE_SCHEMA = LIFX_EFFECT_SCHEMA.extend({
ATTR_BRIGHTNESS: VALID_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT: VALID_BRIGHTNESS_PCT,
ATTR_COLOR_NAME: cv.string,
ATTR_RGB_COLOR: vol.All(vol.ExactSequence((cv.byte, cv.byte, cv.byte)),
vol.Coerce(tuple)),
ATTR_COLOR_TEMP: vol.All(vol.Coerce(int), vol.Range(min=1)),
ATTR_KELVIN: vol.All(vol.Coerce(int), vol.Range(min=0)),
ATTR_PERIOD: vol.All(vol.Coerce(float), vol.Range(min=0.05)),
ATTR_CYCLES: vol.All(vol.Coerce(float), vol.Range(min=1)),
ATTR_MODE: vol.In(PULSE_MODES),
})
LIFX_EFFECT_COLORLOOP_SCHEMA = LIFX_EFFECT_SCHEMA.extend({
ATTR_BRIGHTNESS: VALID_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT: VALID_BRIGHTNESS_PCT,
ATTR_PERIOD: vol.All(vol.Coerce(float), vol.Clamp(min=0.05)),
ATTR_CHANGE: vol.All(vol.Coerce(float), vol.Clamp(min=0, max=360)),
ATTR_SPREAD: vol.All(vol.Coerce(float), vol.Clamp(min=0, max=360)),
ATTR_TRANSITION: vol.All(vol.Coerce(float), vol.Range(min=0)),
})
LIFX_EFFECT_STOP_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the LIFX platform."""
import aiolifx
if sys.platform == 'win32':
_LOGGER.warning("The lifx platform is known to not work on Windows. "
"Consider using the lifx_legacy platform instead")
server_addr = config.get(CONF_SERVER)
lifx_manager = LIFXManager(hass, async_add_devices)
lifx_discovery = aiolifx.LifxDiscovery(hass.loop, lifx_manager)
coro = hass.loop.create_datagram_endpoint(
lambda: lifx_discovery, local_addr=(server_addr, UDP_BROADCAST_PORT))
hass.async_add_job(coro)
@callback
def cleanup(event):
"""Clean up resources."""
lifx_discovery.cleanup()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup)
return True
def find_hsbk(**kwargs):
"""Find the desired color from a number of possible inputs."""
hue, saturation, brightness, kelvin = [None]*4
preprocess_turn_on_alternatives(kwargs)
if ATTR_RGB_COLOR in kwargs:
hue, saturation, brightness = \
color_util.color_RGB_to_hsv(*kwargs[ATTR_RGB_COLOR])
saturation = convert_8_to_16(saturation)
brightness = convert_8_to_16(brightness)
kelvin = 3500
if ATTR_XY_COLOR in kwargs:
hue, saturation = color_util.color_xy_to_hs(*kwargs[ATTR_XY_COLOR])
saturation = convert_8_to_16(saturation)
kelvin = 3500
if ATTR_COLOR_TEMP in kwargs:
kelvin = int(color_util.color_temperature_mired_to_kelvin(
kwargs[ATTR_COLOR_TEMP]))
saturation = 0
if ATTR_BRIGHTNESS in kwargs:
brightness = convert_8_to_16(kwargs[ATTR_BRIGHTNESS])
hsbk = [hue, saturation, brightness, kelvin]
return None if hsbk == [None]*4 else hsbk
def merge_hsbk(base, change):
"""Copy change on top of base, except when None."""
if change is None:
return None
return list(map(lambda x, y: y if y is not None else x, base, change))
class LIFXManager(object):
"""Representation of all known LIFX entities."""
def __init__(self, hass, async_add_devices):
"""Initialize the light."""
import aiolifx_effects
self.entities = {}
self.hass = hass
self.async_add_devices = async_add_devices
self.effects_conductor = aiolifx_effects.Conductor(loop=hass.loop)
descriptions = load_yaml_config_file(
path.join(path.dirname(__file__), 'services.yaml'))
self.register_set_state(descriptions)
self.register_effects(descriptions)
def register_set_state(self, descriptions):
"""Register the LIFX set_state service call."""
@asyncio.coroutine
def async_service_handle(service):
"""Apply a service."""
tasks = []
for light in self.service_to_entities(service):
if service.service == SERVICE_LIFX_SET_STATE:
task = light.async_set_state(**service.data)
tasks.append(self.hass.async_add_job(task))
if tasks:
yield from asyncio.wait(tasks, loop=self.hass.loop)
self.hass.services.async_register(
DOMAIN, SERVICE_LIFX_SET_STATE, async_service_handle,
descriptions.get(SERVICE_LIFX_SET_STATE),
schema=LIFX_SET_STATE_SCHEMA)
def register_effects(self, descriptions):
"""Register the LIFX effects as hass service calls."""
@asyncio.coroutine
def async_service_handle(service):
"""Apply a service, i.e. start an effect."""
entities = self.service_to_entities(service)
if entities:
yield from self.start_effect(
entities, service.service, **service.data)
self.hass.services.async_register(
DOMAIN, SERVICE_EFFECT_PULSE, async_service_handle,
descriptions.get(SERVICE_EFFECT_PULSE),
schema=LIFX_EFFECT_PULSE_SCHEMA)
self.hass.services.async_register(
DOMAIN, SERVICE_EFFECT_COLORLOOP, async_service_handle,
descriptions.get(SERVICE_EFFECT_COLORLOOP),
schema=LIFX_EFFECT_COLORLOOP_SCHEMA)
self.hass.services.async_register(
DOMAIN, SERVICE_EFFECT_STOP, async_service_handle,
descriptions.get(SERVICE_EFFECT_STOP),
schema=LIFX_EFFECT_STOP_SCHEMA)
@asyncio.coroutine
def start_effect(self, entities, service, **kwargs):
"""Start a light effect on entities."""
import aiolifx_effects
devices = list(map(lambda l: l.device, entities))
if service == SERVICE_EFFECT_PULSE:
effect = aiolifx_effects.EffectPulse(
power_on=kwargs.get(ATTR_POWER_ON, None),
period=kwargs.get(ATTR_PERIOD, None),
cycles=kwargs.get(ATTR_CYCLES, None),
mode=kwargs.get(ATTR_MODE, None),
hsbk=find_hsbk(**kwargs),
)
yield from self.effects_conductor.start(effect, devices)
elif service == SERVICE_EFFECT_COLORLOOP:
preprocess_turn_on_alternatives(kwargs)
brightness = None
if ATTR_BRIGHTNESS in kwargs:
brightness = convert_8_to_16(kwargs[ATTR_BRIGHTNESS])
effect = aiolifx_effects.EffectColorloop(
power_on=kwargs.get(ATTR_POWER_ON, None),
period=kwargs.get(ATTR_PERIOD, None),
change=kwargs.get(ATTR_CHANGE, None),
spread=kwargs.get(ATTR_SPREAD, None),
transition=kwargs.get(ATTR_TRANSITION, None),
brightness=brightness,
)
yield from self.effects_conductor.start(effect, devices)
elif service == SERVICE_EFFECT_STOP:
yield from self.effects_conductor.stop(devices)
def service_to_entities(self, service):
"""Return the known devices that a service call mentions."""
entity_ids = extract_entity_ids(self.hass, service)
if entity_ids:
entities = [entity for entity in self.entities.values()
if entity.entity_id in entity_ids]
else:
entities = list(self.entities.values())
return entities
@callback
def register(self, device):
"""Handle for newly detected bulb."""
if device.mac_addr in self.entities:
entity = self.entities[device.mac_addr]
entity.device = device
entity.registered = True
_LOGGER.debug("%s register AGAIN", entity.who)
self.hass.async_add_job(entity.async_update_ha_state())
else:
_LOGGER.debug("%s register NEW", device.ip_addr)
device.get_version(self.got_version)
@callback
def got_version(self, device, msg):
"""Request current color setting once we have the product version."""
device.get_color(self.ready)
@callback
def ready(self, device, msg):
"""Handle the device once all data is retrieved."""
entity = LIFXLight(device, self.effects_conductor)
_LOGGER.debug("%s register READY", entity.who)
self.entities[device.mac_addr] = entity
self.async_add_devices([entity])
@callback
def unregister(self, device):
"""Handle disappearing bulbs."""
if device.mac_addr in self.entities:
entity = self.entities[device.mac_addr]
_LOGGER.debug("%s unregister", entity.who)
entity.registered = False
self.hass.async_add_job(entity.async_update_ha_state())
class AwaitAioLIFX:
"""Wait for an aiolifx callback and return the message."""
def __init__(self, light):
"""Initialize the wrapper."""
self.light = light
self.device = None
self.message = None
self.event = asyncio.Event()
@callback
def callback(self, device, message):
"""Handle responses."""
self.device = device
self.message = message
self.event.set()
@asyncio.coroutine
def wait(self, method):
"""Call an aiolifx method and wait for its response."""
self.device = None
self.message = None
self.event.clear()
method(self.callback)
yield from self.event.wait()
return self.message
def convert_8_to_16(value):
"""Scale an 8 bit level into 16 bits."""
return (value << 8) | value
def convert_16_to_8(value):
"""Scale a 16 bit level into 8 bits."""
return value >> 8
class LIFXLight(Light):
"""Representation of a LIFX light."""
def __init__(self, device, effects_conductor):
"""Initialize the light."""
self.device = device
self.effects_conductor = effects_conductor
self.registered = True
self.product = device.product
self.postponed_update = None
@property
def lifxwhite(self):
"""Return whether this is a white-only bulb."""
# https://lan.developer.lifx.com/docs/lifx-products
return self.product in [10, 11, 18]
@property
def available(self):
"""Return the availability of the device."""
return self.registered
@property
def name(self):
"""Return the name of the device."""
return self.device.label
@property
def who(self):
"""Return a string identifying the device."""
ip_addr = '-'
if self.device:
ip_addr = self.device.ip_addr[0]
return "%s (%s)" % (ip_addr, self.name)
@property
def rgb_color(self):
"""Return the RGB value."""
hue, sat, bri, _ = self.device.color
return color_util.color_hsv_to_RGB(
hue, convert_16_to_8(sat), convert_16_to_8(bri))
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
brightness = convert_16_to_8(self.device.color[2])
_LOGGER.debug("brightness: %d", brightness)
return brightness
@property
def color_temp(self):
"""Return the color temperature."""
kelvin = self.device.color[3]
temperature = color_util.color_temperature_kelvin_to_mired(kelvin)
_LOGGER.debug("color_temp: %d", temperature)
return temperature
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
# The 3 LIFX "White" products supported a limited temperature range
if self.lifxwhite:
kelvin = 6500
else:
kelvin = 9000
return math.floor(color_util.color_temperature_kelvin_to_mired(kelvin))
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
# The 3 LIFX "White" products supported a limited temperature range
if self.lifxwhite:
kelvin = 2700
else:
kelvin = 2500
return math.ceil(color_util.color_temperature_kelvin_to_mired(kelvin))
@property
def is_on(self):
"""Return true if device is on."""
return self.device.power_level != 0
@property
def effect(self):
"""Return the name of the currently running effect."""
effect = self.effects_conductor.effect(self.device)
if effect:
return 'lifx_effect_' + effect.name
return None
@property
def supported_features(self):
"""Flag supported features."""
features = (SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP |
SUPPORT_TRANSITION | SUPPORT_EFFECT)
if not self.lifxwhite:
features |= SUPPORT_RGB_COLOR | SUPPORT_XY_COLOR
return features
@property
def effect_list(self):
"""Return the list of supported effects for this light."""
if self.lifxwhite:
return [
SERVICE_EFFECT_PULSE,
SERVICE_EFFECT_STOP,
]
return [
SERVICE_EFFECT_COLORLOOP,
SERVICE_EFFECT_PULSE,
SERVICE_EFFECT_STOP,
]
@asyncio.coroutine
def update_after_transition(self, now):
"""Request new status after completion of the last transition."""
self.postponed_update = None
yield from self.async_update()
yield from self.async_update_ha_state()
def update_later(self, when):
"""Schedule an update requests when a transition is over."""
if self.postponed_update:
self.postponed_update()
self.postponed_update = None
if when > 0:
self.postponed_update = async_track_point_in_utc_time(
self.hass, self.update_after_transition,
util.dt.utcnow() + timedelta(milliseconds=when))
@asyncio.coroutine
def async_turn_on(self, **kwargs):
"""Turn the device on."""
kwargs[ATTR_POWER] = True
yield from self.async_set_state(**kwargs)
@asyncio.coroutine
def async_turn_off(self, **kwargs):
"""Turn the device off."""
kwargs[ATTR_POWER] = False
yield from self.async_set_state(**kwargs)
@asyncio.coroutine
def async_set_state(self, **kwargs):
"""Set a color on the light and turn it on/off."""
yield from self.effects_conductor.stop([self.device])
if ATTR_EFFECT in kwargs:
yield from self.default_effect(**kwargs)
return
if ATTR_INFRARED in kwargs:
self.device.set_infrared(convert_8_to_16(kwargs[ATTR_INFRARED]))
if ATTR_TRANSITION in kwargs:
fade = int(kwargs[ATTR_TRANSITION] * 1000)
else:
fade = 0
# These are both False if ATTR_POWER is not set
power_on = kwargs.get(ATTR_POWER, False)
power_off = not kwargs.get(ATTR_POWER, True)
hsbk = merge_hsbk(self.device.color, find_hsbk(**kwargs))
# Send messages, waiting for ACK each time
ack = AwaitAioLIFX(self).wait
bulb = self.device
if not self.is_on:
if power_off:
yield from ack(partial(bulb.set_power, False))
if hsbk:
yield from ack(partial(bulb.set_color, hsbk))
if power_on:
yield from ack(partial(bulb.set_power, True, duration=fade))
else:
if power_on:
yield from ack(partial(bulb.set_power, True))
if hsbk:
yield from ack(partial(bulb.set_color, hsbk, duration=fade))
if power_off:
yield from ack(partial(bulb.set_power, False, duration=fade))
# Avoid state ping-pong by holding off updates while the state settles
yield from asyncio.sleep(0.25)
# Schedule an update when the transition is complete
self.update_later(fade)
@asyncio.coroutine
def default_effect(self, **kwargs):
"""Start an effect with default parameters."""
service = kwargs[ATTR_EFFECT]
data = {
ATTR_ENTITY_ID: self.entity_id,
}
yield from self.hass.services.async_call(DOMAIN, service, data)
@asyncio.coroutine
def async_update(self):
"""Update bulb status."""
_LOGGER.debug("%s async_update", self.who)
if self.available:
yield from AwaitAioLIFX(self).wait(self.device.get_color)
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, os, copy, json, re
from frappe import _
from frappe.modules import get_doc_path
from jinja2 import TemplateNotFound
from frappe.utils import cint, strip_html
from frappe.utils.pdf import get_pdf
no_cache = 1
no_sitemap = 1
base_template_path = "templates/www/print.html"
standard_format = "templates/print_formats/standard.html"
def get_context(context):
"""Build context for print"""
if not ((frappe.form_dict.doctype and frappe.form_dict.name) or frappe.form_dict.doc):
return {
"body": """<h1>Error</h1>
<p>Parameters doctype and name required</p>
<pre>%s</pre>""" % repr(frappe.form_dict)
}
if frappe.form_dict.doc:
doc = frappe.form_dict.doc
else:
doc = frappe.get_doc(frappe.form_dict.doctype, frappe.form_dict.name)
meta = frappe.get_meta(doc.doctype)
print_format = get_print_format_doc(None, meta = meta)
return {
"body": get_html(doc, print_format = print_format,
meta=meta, trigger_print = frappe.form_dict.trigger_print,
no_letterhead=frappe.form_dict.no_letterhead),
"css": get_print_style(frappe.form_dict.style, print_format),
"comment": frappe.session.user,
"title": doc.get(meta.title_field) if meta.title_field else doc.name
}
def get_print_format_doc(print_format_name, meta):
"""Returns print format document"""
if not print_format_name:
print_format_name = frappe.form_dict.format \
or meta.default_print_format or "Standard"
if print_format_name == "Standard":
return None
else:
try:
return frappe.get_doc("Print Format", print_format_name)
except frappe.DoesNotExistError:
# if old name, return standard!
return None
def get_html(doc, name=None, print_format=None, meta=None,
no_letterhead=None, trigger_print=False):
print_settings = frappe.db.get_singles_dict("Print Settings")
if isinstance(no_letterhead, basestring):
no_letterhead = cint(no_letterhead)
elif no_letterhead is None:
no_letterhead = not cint(print_settings.with_letterhead)
doc.flags.in_print = True
if not frappe.flags.ignore_print_permissions:
validate_print_permission(doc)
if doc.meta.is_submittable:
if doc.docstatus==0 and not print_settings.allow_print_for_draft:
frappe.throw(_("Not allowed to print draft documents"), frappe.PermissionError)
if doc.docstatus==2 and not print_settings.allow_print_for_cancelled:
frappe.throw(_("Not allowed to print cancelled documents"), frappe.PermissionError)
if hasattr(doc, "before_print"):
doc.before_print()
if not hasattr(doc, "print_heading"): doc.print_heading = None
if not hasattr(doc, "sub_heading"): doc.sub_heading = None
if not meta:
meta = frappe.get_meta(doc.doctype)
jenv = frappe.get_jenv()
format_data, format_data_map = [], {}
# determine template
if print_format:
if print_format.format_data:
# set format data
format_data = json.loads(print_format.format_data)
for df in format_data:
format_data_map[df.get("fieldname")] = df
if "visible_columns" in df:
for _df in df.get("visible_columns"):
format_data_map[_df.get("fieldname")] = _df
doc.format_data_map = format_data_map
template = "standard"
elif print_format.standard=="Yes" or print_format.custom_format:
template = jenv.from_string(get_print_format(doc.doctype,
print_format))
else:
# fallback
template = "standard"
else:
template = "standard"
if template == "standard":
template = jenv.get_template(standard_format)
letter_head = frappe._dict(get_letter_head(doc, no_letterhead) or {})
args = {
"doc": doc,
"meta": frappe.get_meta(doc.doctype),
"layout": make_layout(doc, meta, format_data),
"no_letterhead": no_letterhead,
"trigger_print": cint(trigger_print),
"letter_head": letter_head.content,
"footer": letter_head.footer,
"print_settings": frappe.get_doc("Print Settings")
}
html = template.render(args, filters={"len": len})
if cint(trigger_print):
html += trigger_print_script
return html
@frappe.whitelist()
def get_html_and_style(doc, name=None, print_format=None, meta=None,
no_letterhead=None, trigger_print=False):
"""Returns `html` and `style` of print format, used in PDF etc"""
if isinstance(doc, basestring) and isinstance(name, basestring):
doc = frappe.get_doc(doc, name)
if isinstance(doc, basestring):
doc = frappe.get_doc(json.loads(doc))
print_format = get_print_format_doc(print_format, meta=meta or frappe.get_meta(doc.doctype))
return {
"html": get_html(doc, name=name, print_format=print_format, meta=meta,
no_letterhead=no_letterhead, trigger_print=trigger_print),
"style": get_print_style(print_format=print_format)
}
def validate_print_permission(doc):
if frappe.form_dict.get("key"):
if frappe.form_dict.key == doc.get_signature():
return
for ptype in ("read", "print"):
if (not frappe.has_permission(doc.doctype, ptype, doc)
and not frappe.has_website_permission(doc.doctype, ptype, doc)):
raise frappe.PermissionError(_("No {0} permission").format(ptype))
def get_letter_head(doc, no_letterhead):
if no_letterhead:
return {}
if doc.get("letter_head"):
return frappe.db.get_value("Letter Head", doc.letter_head, ["content", "footer"], as_dict=True)
else:
return frappe.db.get_value("Letter Head", {"is_default": 1}, ["content", "footer"], as_dict=True) or {}
def get_print_format(doctype, print_format):
if print_format.disabled:
frappe.throw(_("Print Format {0} is disabled").format(print_format.name),
frappe.DoesNotExistError)
# server, find template
path = os.path.join(get_doc_path(frappe.db.get_value("DocType", doctype, "module"),
"Print Format", print_format.name), frappe.scrub(print_format.name) + ".html")
if os.path.exists(path):
with open(path, "r") as pffile:
return pffile.read()
else:
if print_format.html:
return print_format.html
else:
frappe.throw(_("No template found at path: {0}").format(path),
frappe.TemplateNotFoundError)
def make_layout(doc, meta, format_data=None):
"""Builds a hierarchical layout object from the fields list to be rendered
by `standard.html`
:param doc: Document to be rendered.
:param meta: Document meta object (doctype).
:param format_data: Fields sequence and properties defined by Print Format Builder."""
layout, page = [], []
layout.append(page)
if format_data:
# extract print_heading_template from the first field
# and remove the field
if format_data[0].get("fieldname") == "print_heading_template":
doc.print_heading_template = format_data[0].get("options")
format_data = format_data[1:]
for df in format_data or meta.fields:
if format_data:
# embellish df with original properties
df = frappe._dict(df)
if df.fieldname:
original = meta.get_field(df.fieldname)
if original:
newdf = original.as_dict()
newdf.update(df)
df = newdf
df.print_hide = 0
if df.fieldtype=="Section Break" or page==[]:
if len(page) > 1 and not any(page[-1]):
# truncate prev section if empty
del page[-1]
page.append([])
if df.fieldtype=="Column Break" or (page[-1]==[] and df.fieldtype!="Section Break"):
page[-1].append([])
if df.fieldtype=="HTML" and df.options:
doc.set(df.fieldname, True) # show this field
if is_visible(df, doc) and has_value(df, doc):
page[-1][-1].append(df)
# if table, add the row info in the field
# if a page break is found, create a new docfield
if df.fieldtype=="Table":
df.rows = []
df.start = 0
df.end = None
for i, row in enumerate(doc.get(df.fieldname)):
if row.get("page_break"):
# close the earlier row
df.end = i
# new page, with empty section and column
page = [[[]]]
layout.append(page)
# continue the table in a new page
df = copy.copy(df)
df.start = i
df.end = None
page[-1][-1].append(df)
return layout
def is_visible(df, doc):
"""Returns True if docfield is visible in print layout and does not have print_hide set."""
if df.fieldtype in ("Section Break", "Column Break", "Button"):
return False
if hasattr(doc, "hide_in_print_layout"):
if df.fieldname in doc.hide_in_print_layout:
return False
if df.permlevel > 0 and not doc.has_permlevel_access_to(df.fieldname, df):
return False
return not doc.is_print_hide(df.fieldname, df)
def has_value(df, doc):
value = doc.get(df.fieldname)
if value in (None, ""):
return False
elif isinstance(value, basestring) and not strip_html(value).strip():
return False
elif isinstance(value, list) and not len(value):
return False
return True
def get_print_style(style=None, print_format=None, for_legacy=False):
print_settings = frappe.get_doc("Print Settings")
if not style:
style = print_settings.print_style or "Standard"
context = {
"print_settings": print_settings,
"print_style": style,
"font": get_font(print_settings, print_format, for_legacy)
}
css = frappe.get_template("templates/styles/standard.css").render(context)
try:
css += frappe.get_template("templates/styles/" + style.lower() + ".css").render(context)
except TemplateNotFound:
pass
# move @import to top
for at_import in list(set(re.findall("(@import url\([^\)]+\)[;]?)", css))):
css = css.replace(at_import, "")
# prepend css with at_import
css = at_import + css
if print_format and print_format.css:
css += "\n\n" + print_format.css
return css
def get_font(print_settings, print_format=None, for_legacy=False):
default = '"Helvetica Neue", Helvetica, Arial, "Open Sans", sans-serif'
if for_legacy:
return default
font = None
if print_format:
if print_format.font and print_format.font!="Default":
font = '{0}, sans-serif'.format(print_format.font)
if not font:
if print_settings.font and print_settings.font!="Default":
font = '{0}, sans-serif'.format(print_settings.font)
else:
font = default
return font
def get_visible_columns(data, table_meta, df):
"""Returns list of visible columns based on print_hide and if all columns have value."""
columns = []
doc = data[0] or frappe.new_doc(df.options)
def add_column(col_df):
return is_visible(col_df, doc) \
and column_has_value(data, col_df.get("fieldname"))
if df.get("visible_columns"):
# columns specified by column builder
for col_df in df.get("visible_columns"):
# load default docfield properties
docfield = table_meta.get_field(col_df.get("fieldname"))
if not docfield:
continue
newdf = docfield.as_dict().copy()
newdf.update(col_df)
if add_column(newdf):
columns.append(newdf)
else:
for col_df in table_meta.fields:
if add_column(col_df):
columns.append(col_df)
return columns
def column_has_value(data, fieldname):
"""Check if at least one cell in column has non-zero and non-blank value"""
has_value = False
for row in data:
value = row.get(fieldname)
if value:
if isinstance(value, basestring):
if strip_html(value).strip():
has_value = True
break
else:
has_value = True
break
return has_value
trigger_print_script = """
<script>
window.print();
// close the window after print
// NOTE: doesn't close if print is cancelled in Chrome
setTimeout(function() {
window.close();
}, 1000);
</script>
"""
|
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 18 13:51:57 2015
@author: LLP-admin
"""
import os
from simple import *
from sklearn import svm
#classifiers
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.feature_selection import RFE
from sklearn.linear_model import RandomizedLogisticRegression;
from sklearn.linear_model import LogisticRegression
#feature selection
from sklearn.feature_selection import SelectKBest, f_regression
#scoring metric
from sklearn.metrics import f1_score
from sklearn.metrics import classification_report
from sklearn.cross_validation import cross_val_score
#encoding
from sklearn.preprocessing import LabelEncoder
#plotting
import matplotlib.pyplot as plt
import filters
def makeTestData1(filepath, maxTrainSess, nTestPerClass = 10):
"""
Given the filepath to the per-user data set, make a test data set to have
the last 'x' number of instances from each class.
Returns the dataFrame of the test data.
This is to be used with getReport1
"""
dict_df_class = divideByClass(filepath);
dict_trainPerClass = {}
test_set = pd.DataFrame();
for (className, df_class) in dict_df_class.iteritems():
length = len(df_class);
if (maxTrainSess < length - nTestPerClass):
print "WARNING: maxTrainSess can be too big for later training sessions."
print "It may result in overlapping data instances in training and testing."
upperbd = min(maxTrainSess, length-nTestPerClass-1)-1; #+1?
#Add to the new dictionary for train set per class (value), with key = className.
dict_trainPerClass[className] = df_class.iloc[range(0,upperbd)];
# print "printing test df_class:\n ", df_class.iloc[range(length-numPerClass, length)]
# print "updated to trian dic!"
#Create the fixed sized, global test_set.
test_set = test_set.append(df_class.iloc[range(length-nTestPerClass, length)], ignore_index = True);
# print "Finished appending to the test_set";
# print "Now the size of the test_set is: \n", test_set;
return dict_trainPerClass, test_set;
def getReport1(filepath, classifier, maxTrainSess, nTestPerClass = 10):
"""
Given the filepath,
"""
#Make the trainPerClass dictionary and the global test set.
dict_trainPerClass, test_set = makeTestData1(filepath, maxTrainSess, nTestPerClass);
sampleDF = dict_trainPerClass.values()[0];
columns = sampleDF.columns;
# assert (test_set.columns == columns);
#Initialize the classifier with parameters
classifier = classifier; #ToDo: parameter setting. getOtions?
#Initialize the record.
#Key = one-based index of the last run, Value = %accuracy of the classifier after the last training.
report = {};
#Prepare the test set.
rawTest_x, rawTest_y = splitXY(test_set);
#nInstances: we will choose the minimum
#print "list:" ,[len(trainPerClass) for trainPerClass in dict_trainPerClass.itervalues()]
n_tr= min([len(trainPerClass) for trainPerClass in dict_trainPerClass.itervalues()]);
# print "max number of training sessions possible is: ", n_tr;
uppbd = min(n_tr, maxTrainSess)
for j in range(1, uppbd + 1):
#Initialize the batch for training.
batch = pd.DataFrame(columns = columns);
for trainPerClass in dict_trainPerClass.itervalues():
batch = batch.append(trainPerClass.iloc[0:j]);
#split the batch into features and labels
batch_x, batch_y = splitXY(batch);
#Standardize the train data. Apply the mean and std parameter to scale the test data accordingly.
std_batch_x, std_test_x = piped_standardize(batch_x, rawTest_x);
#ToDo: Apply filters here
#Filter1: remove constant feature colms
# selectedCols = filters.notConstantCols(std_batch_x);
isNotConst = np.array((np.std(std_batch_x) != 0 ))
#Combine the filters
isSelectedCol = isNotConst#isRFE & isNotConst;
# print "size match?: ", len(isSelectedCol) == len(std_batch_x.columns)
######Don't need to modify even after new filters#################
#selectedCols is a list of column names that are selected.
selectedCols = [std_batch_x.columns[i] for (i,v) in enumerate(isSelectedCol) if v == 1]
# print 'Selected Cols: ', selectedCols
filtered_batch_x = std_batch_x[selectedCols];
filtered_test_x = std_test_x[selectedCols];
#For now
train_x = filtered_batch_x; train_y = batch_y;
test_x = filtered_test_x; test_y = rawTest_y;
#train_y and test_y's index must be in order
train_y.index = range(0, len(train_y));
test_y.index = range(0, len(test_y));
assert(len(train_x.columns) == len(test_x.columns));
#Don't need to modify even after more filtering is applied later
#train the classifier on this batch
classifier.fit(train_x, train_y);
#test the classifier on the fixed test set
score = classifier.score(test_x, test_y);
#record the accuracy (%)
report[j] = score;
# print 'report', report
return report
def anovaFS(test_x, test_y, nFeatures):
"""
Use cross-validation with nfolds < nsamples in test_x (i.e. nTestPerClass (defualt 10) * nClasses (eg 12))
Select best features based on ANOVA for svm.
"""
#1. Run SVM to get the feature ranking
anova_filter = SelectKBest(f_regression, k= nFeatures)
anova_filter.fit(test_x, test_y)
print 'selected features in boolean: \n', anova_filter.get_support()
print 'selected features in name: \n', test_x.columns[anova_filter.get_support()];
#2. Select the top nFeatures features
selectedCols = test_x.columns[anova_filter.get_support()]
#3. Run SVM (or any other) again on this selected features
return selectedCols
def evalthisFS(train_x, train_y, test_x, test_y, classifier, selectedCols):
if len(selectedCols) == 0:
score = 0.0
train_x = train_x[selectedCols];
test_x = test_x[selectedCols];
#Don't need to modify even after more filtering is applied later
#train the classifier on this batch
classifier.fit(train_x, train_y);
#test the classifier on the fixed test set
score = classifier.score(test_x, test_y);
return (selectedCols, score);
def getReport2(filepath, clf, maxTrainSess, nTestPerClass = 10, showReport = True):
dict_dataPerClass = divideByClass(filepath);
sampleDF = dict_dataPerClass.values()[0];
columns = sampleDF.columns
# np_dataPerClass = [np.array(dataPerClass) for dataPerClass in dict_dataPerClass.values()]
no_filter_report= {};
yes_filter_report = {};
#Initialize the batch for training.
for j in range(1, maxTrainSess +1):
#Clear out the test_set for each training session
test_set = pd.DataFrame(columns = columns);
batch = pd.DataFrame(columns = columns)
for dataPerClass in dict_dataPerClass.itervalues():
# assert( not(dataPerClass.isnull().any().any()) ) ; print 'No None in this class dataset!'
batch = batch.append(dataPerClass.iloc[0:j]);
#Now, need to prepare the test data set.
test_set = test_set.append( dataPerClass.iloc[j+1:j+nTestPerClass+1] )
#split the batch into features and labels
batch_x, batch_y = splitXY(batch)
rawTest_x, rawTest_y = splitXY(test_set)
#Done creating training and test data sets for this session.
#Standardize the train data. Apply the mean and std parameter to scale the test data accordingly.
std_batch_x, std_test_x = piped_standardize(batch_x, rawTest_x);
#Label encoding
# batch_y.index = range(0, len(batch_y))
le = LabelEncoder()
le.fit(batch_y);
encBatch_y = le.transform(batch_y)
encTest_y = le.transform(rawTest_y)
#ToDo: Apply filters here:
#ToDo: do feature selections independent of the learning method.
#Filter1: remove constant feature colms
# selectedCols = filters.notConstantCols(std_batch_x);
isNotConst = np.array((np.std(std_batch_x) != 0 ))
#Combine the filters
isSelectedCol = isNotConst
######Don't need to modify even after new filters#################
#selectedCols is a list of column names that are selected.
#selectedCols is the list of strings (i.e. selected column names)
selectedCols = [std_batch_x.columns[i] for (i,v) in enumerate(isSelectedCol) if v == 1]
# print 'Selected Cols: ', selectedCols
filtered_batch_x = std_batch_x[selectedCols];
filtered_test_x = std_test_x[selectedCols];
#For now
train_x = filtered_batch_x; train_y = encBatch_y
test_x = filtered_test_x; test_y = encTest_y
#Make sure there is no NaN in train_x, train_y, test_x, test_y
# assert( not(train_x.isnull().any().any()) ); print 'train-x pass!';
# assert( not(train_y.isnull().any().any()) ) ; print 'train-y pass!'
# assert( not(test_x.isnull().any().any()) ) ; print 'test-x pass!'
# assert( not(test_y.isnull().any().any()) ) ; print 'test-y pass!'
#Make sure the number of features in train_x and test_x are same
assert(len(train_x.columns) == len(test_x.columns));
#Don't need to modify even after more filtering is applied later
#train the classifier on this batch
classifier = clf
classifier.fit(train_x, train_y);
# print 'selected features: ', classifier.feature_importances_
#test the classifier on the fixed test set
score = classifier.score(test_x, test_y);
#record the accuracy (%)
no_filter_report[j] = score;
print 'nofilter score: ', score
if showReport:
pred_y = classifier.predict(test_x);
print '\n'+classification_report(test_y, pred_y)
# print 'f_1 score avgerage, macro: ', f1_score(test_y, pred_y, average='macro')
###########DO SVM Filtering######################################
###################################################################
###################################################################
selectedCols = anovaFS(train_x, train_y, nFeatures = 10)
newClassifier = clf#Restet to initial (blank) classifier
yes_filter_report[j] = evalthisFS(train_x, train_y,test_x, test_y, newClassifier, selectedCols )
print 'filter score :', yes_filter_report[j]
# print 'report', report
return no_filter_report,yes_filter_report
def perUserEvaluations(dirPath, classifier, classifierName, maxTrainSess, nTestPerClass = 10):
"""
Inputs:
1. dirPath: String path to the directory
2. classifier: sklearn classifier object
3. classifierName: String of the classifier's name
4. nTestPerClass: number of tests per class (total test set size is 10*number of class values).
(Default is fixed to ten)
Outputs:
1. Outputs graphs of the report, one graph per dataset in the directory.
2. Returns a list of peak %accuracy for each per-user data set.
"""
############Inside helper function#########################
###########################################################
def showSaveReport(report, toSave = True):
"""Given the report dictionary,
plot the graph with x-aix: number of training batch, y-axis %accuracy.
"""
# print classifier.name??
plt.figure()
plt.plot(report.keys(), report.values());
plt.title("Report on: "+ fname+ \
"\nClassifier: "+ clfName +\
"\nFirst Peak: " + str(p_accuracy) + " at " + str(p_idx) );
plt.xlim([0,17]);plt.xticks(np.arange(0, 17, 1.0));
plt.ylim([0,1.0]);plt.yticks(np.arange(0, 1.0, 0.1));
plt.xlabel("number of training batch")
plt.ylabel("% accuracy")
#If you want to save the graphs
if toSave:
#Set up output path
outDir = '..\\perUser_evaluations\\' + clfName+ '\\'
outName = fname +'.png'
outPath = path.join(outDir, outName)
#check if the outpath is already created.
try:
os.makedirs(outDir);
except OSError:
if not os.path.isdir(outDir):
raise
#Save and show the result
plt.savefig(outPath) #classifierName
plt.show();
plt.close();
return
########################################################
clfName = classifierName;
#Initializa peaks: a list of peak info for each per-user data set.
peaks = [];
#Run evaluation and graph report for each per-user data in the directory.
for i in range(0,6):
fname = "user_" + str(i)
filepath = dirPath + fname + ".csv";
#Get the report for the data set
report = getReport2(filepath, classifier, maxTrainSess, nTestPerClass = 10, showReport = False);
#Get peak information.
(p_idx, p_accuracy) = findFirstPeak(report);
peaks.append(p_accuracy);
#Graph the report
showSaveReport(report);
return peaks
#########################################################################################
##################################################################################
###################################################################################
####Test for runEvaluation###
###Initialize the classifier
##clf = KNeighborsClassifier(1);
#clf = svm.SVC(kernel="linear")
##clf = LogisticRegression
#clfName = 'svc_linear'
##clfName = 'RandomForest'
####classifier =DecisionTreeClassifier();
##classifier = RandomForestClassifier();
###################################to run
#dirPath = 'C:\\Users\\LLP-admin\\workspace\\weka\\token-experiment-dataset\\';
#fname = 'user_' + str(0)
#fmt = '.csv'
#filepath = dirPath + fname + fmt
#peaks = perUserEvaluations(dirPath, clf, clfName, maxTrainSess = 17);
#print "peak lists: ", peaks
##############################################
#for i in range(0,6):
# fname = 'features_' + str(i)
# fmt = '.csv'
# filepath = dirPath + fname + fmt
# report1 = getReport1(filepath, clf, maxTrainSess = 17, nTestPerClass = 10);
# report2 = getReport2(filepath, clf, maxTrainSess = 17, nTestPerClass = 10);
# plt.figure(0);
# plt.plot(report1.keys(), report1.values(), label='fixed testset' )
# plt.hold;
# plt.plot(report2.keys(), report2.values(), label = 'subseq. testset');
#
# plt.title(fname + '\n'+clfName)
# plt.legend(loc='best')
# length = max(len(report1), len(report2))
# plt.xlim([1,length]);plt.xticks(np.arange(1, length, 1.0));
#
# plt.ylim([0,1.0]);plt.yticks(np.arange(0, 1.0, 0.1));
#
#
# #Set up output path
# outDir = '..\\compareTestTypes\\' + '\\' + clfName + '\\'
# outName = fname+'_'+ clfName + '.png'
# outPath = outDir + outName
#
# #Save and show the result
# plt.savefig(outPath) #classifierName
# plt.show();
# plt.close()
#
#
##To test the 'showReport' edit to getReport2
##The printed report gives info about the classes (how correctly classified each of the members are).
#report, filterReport = getReport2(filepath, clf, maxTrainSess = 3, nTestPerClass = 10, showReport = True)
#print report.values()
#print [v[1] for v in filterReport.values()]
#
|
|
from __future__ import division, absolute_import, print_function
import pytest
import numpy as np
import numpy.ma as ma
from numpy.ma.mrecords import MaskedRecords
from numpy.ma.testutils import assert_equal
from numpy.testing import assert_, assert_raises
from numpy.lib.recfunctions import (
drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields,
find_duplicates, merge_arrays, append_fields, stack_arrays, join_by,
repack_fields, unstructured_to_structured, structured_to_unstructured,
apply_along_fields, require_fields, assign_fields_by_name)
get_names = np.lib.recfunctions.get_names
get_names_flat = np.lib.recfunctions.get_names_flat
zip_descr = np.lib.recfunctions.zip_descr
class TestRecFunctions(object):
# Misc tests
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array([('A', 1.), ('B', 2.)],
dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_zip_descr(self):
# Test zip_descr
(w, x, y, z) = self.data
# Std array
test = zip_descr((x, x), flatten=True)
assert_equal(test,
np.dtype([('', int), ('', int)]))
test = zip_descr((x, x), flatten=False)
assert_equal(test,
np.dtype([('', int), ('', int)]))
# Std & flexible-dtype
test = zip_descr((x, z), flatten=True)
assert_equal(test,
np.dtype([('', int), ('A', '|S3'), ('B', float)]))
test = zip_descr((x, z), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('A', '|S3'), ('B', float)])]))
# Standard & nested dtype
test = zip_descr((x, w), flatten=True)
assert_equal(test,
np.dtype([('', int),
('a', int),
('ba', float), ('bb', int)]))
test = zip_descr((x, w), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('a', int),
('b', [('ba', float), ('bb', int)])])]))
def test_drop_fields(self):
# Test drop_fields
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
# A basic field
test = drop_fields(a, 'a')
control = np.array([((2, 3.0),), ((5, 6.0),)],
dtype=[('b', [('ba', float), ('bb', int)])])
assert_equal(test, control)
# Another basic field (but nesting two fields)
test = drop_fields(a, 'b')
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
# A nested sub-field
test = drop_fields(a, ['ba', ])
control = np.array([(1, (3.0,)), (4, (6.0,))],
dtype=[('a', int), ('b', [('bb', int)])])
assert_equal(test, control)
# All the nested sub-field from a field: zap that field
test = drop_fields(a, ['ba', 'bb'])
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
test = drop_fields(a, ['a', 'b'])
assert_(test is None)
def test_rename_fields(self):
# Test rename fields
a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
dtype=[('a', int),
('b', [('ba', float), ('bb', (float, 2))])])
test = rename_fields(a, {'a': 'A', 'bb': 'BB'})
newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])]
control = a.view(newdtype)
assert_equal(test.dtype, newdtype)
assert_equal(test, control)
def test_get_names(self):
# Test get_names
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names(ndtype)
assert_equal(test, ('A', 'B'))
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names(ndtype)
assert_equal(test, ('a', ('b', ('ba', 'bb'))))
def test_get_names_flat(self):
# Test get_names_flat
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names_flat(ndtype)
assert_equal(test, ('A', 'B'))
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names_flat(ndtype)
assert_equal(test, ('a', 'b', 'ba', 'bb'))
def test_get_fieldstructure(self):
# Test get_fieldstructure
# No nested fields
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A': [], 'B': []})
# One 1-nested field
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']})
# One 2-nested fields
ndtype = np.dtype([('A', int),
('B', [('BA', int),
('BB', [('BBA', int), ('BBB', int)])])])
test = get_fieldstructure(ndtype)
control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'],
'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
assert_equal(test, control)
def test_find_duplicates(self):
# Test find_duplicates
a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')),
(1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))],
mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)),
(0, (0, 0)), (1, (0, 0)), (0, (1, 0))],
dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])])
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 2]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='A', return_index=True)
control = [0, 1, 2, 3, 5]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='B', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='BA', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='BB', return_index=True)
control = [0, 1, 2, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
def test_find_duplicates_ignoremask(self):
# Test the ignoremask option of find_duplicates
ndtype = [('a', int)]
a = ma.array([1, 1, 1, 2, 2, 3, 3],
mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
test = find_duplicates(a, ignoremask=True, return_index=True)
control = [0, 1, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 1, 2, 3, 4, 6]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
def test_repack_fields(self):
dt = np.dtype('u1,f4,i8', align=True)
a = np.zeros(2, dtype=dt)
assert_equal(repack_fields(dt), np.dtype('u1,f4,i8'))
assert_equal(repack_fields(a).itemsize, 13)
assert_equal(repack_fields(repack_fields(dt), align=True), dt)
# make sure type is preserved
dt = np.dtype((np.record, dt))
assert_(repack_fields(dt).type is np.record)
def test_structured_to_unstructured(self):
a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
out = structured_to_unstructured(a)
assert_equal(out, np.zeros((4,5), dtype='f8'))
b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
assert_equal(out, np.array([ 3. , 5.5, 9. , 11. ]))
out = np.mean(structured_to_unstructured(b[['x']]), axis=-1)
assert_equal(out, np.array([ 1. , 4. , 7. , 10. ]))
c = np.arange(20).reshape((4,5))
out = unstructured_to_structured(c, a.dtype)
want = np.array([( 0, ( 1., 2), [ 3., 4.]),
( 5, ( 6., 7), [ 8., 9.]),
(10, (11., 12), [13., 14.]),
(15, (16., 17), [18., 19.])],
dtype=[('a', 'i4'),
('b', [('f0', 'f4'), ('f1', 'u2')]),
('c', 'f4', (2,))])
assert_equal(out, want)
d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
assert_equal(apply_along_fields(np.mean, d),
np.array([ 8.0/3, 16.0/3, 26.0/3, 11. ]))
assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),
np.array([ 3. , 5.5, 9. , 11. ]))
# check that for uniform field dtypes we get a view, not a copy:
d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])
dd = structured_to_unstructured(d)
ddd = unstructured_to_structured(dd, d.dtype)
assert_(dd.base is d)
assert_(ddd.base is d)
# including uniform fields with subarrays unpacked
d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]),
(8, [9, 10], [[11, 12], [13, 14]])],
dtype=[('x0', 'i4'), ('x1', ('i4', 2)), ('x2', ('i4', (2, 2)))])
dd = structured_to_unstructured(d)
ddd = unstructured_to_structured(dd, d.dtype)
assert_(dd.base is d)
assert_(ddd.base is d)
# test that nested fields with identical names don't break anything
point = np.dtype([('x', int), ('y', int)])
triangle = np.dtype([('a', point), ('b', point), ('c', point)])
arr = np.zeros(10, triangle)
res = structured_to_unstructured(arr, dtype=int)
assert_equal(res, np.zeros((10, 6), dtype=int))
def test_field_assignment_by_name(self):
a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
newdt = [('b', 'f4'), ('c', 'u1')]
assert_equal(require_fields(a, newdt), np.ones(2, newdt))
b = np.array([(1,2), (3,4)], dtype=newdt)
assign_fields_by_name(a, b, zero_unassigned=False)
assert_equal(a, np.array([(1,1,2),(1,3,4)], dtype=a.dtype))
assign_fields_by_name(a, b)
assert_equal(a, np.array([(0,1,2),(0,3,4)], dtype=a.dtype))
# test nested fields
a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])])
newdt = [('a', [('c', 'u1')])]
assert_equal(require_fields(a, newdt), np.ones(2, newdt))
b = np.array([((2,),), ((3,),)], dtype=newdt)
assign_fields_by_name(a, b, zero_unassigned=False)
assert_equal(a, np.array([((1,2),), ((1,3),)], dtype=a.dtype))
assign_fields_by_name(a, b)
assert_equal(a, np.array([((0,2),), ((0,3),)], dtype=a.dtype))
# test unstructured code path for 0d arrays
a, b = np.array(3), np.array(0)
assign_fields_by_name(b, a)
assert_equal(b[()], 3)
class TestRecursiveFillFields(object):
# Test recursive_fill_fields.
def test_simple_flexible(self):
# Test recursive_fill_fields on flexible-array
a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
b = np.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = np.array([(1, 10.), (2, 20.), (0, 0.)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
def test_masked_flexible(self):
# Test recursive_fill_fields on masked flexible-array
a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)],
dtype=[('A', int), ('B', float)])
b = ma.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = ma.array([(1, 10.), (2, 20.), (0, 0.)],
mask=[(0, 1), (1, 0), (0, 0)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
class TestMergeArrays(object):
# Test merge_arrays
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array(
[(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_solo(self):
# Test merge_arrays on a single array.
(_, x, _, z) = self.data
test = merge_arrays(x)
control = np.array([(1,), (2,)], dtype=[('f0', int)])
assert_equal(test, control)
test = merge_arrays((x,))
assert_equal(test, control)
test = merge_arrays(z, flatten=False)
assert_equal(test, z)
test = merge_arrays(z, flatten=True)
assert_equal(test, z)
def test_solo_w_flatten(self):
# Test merge_arrays on a single array w & w/o flattening
w = self.data[0]
test = merge_arrays(w, flatten=False)
assert_equal(test, w)
test = merge_arrays(w, flatten=True)
control = np.array([(1, 2, 3.0), (4, 5, 6.0)],
dtype=[('a', int), ('ba', float), ('bb', int)])
assert_equal(test, control)
def test_standard(self):
# Test standard & standard
# Test merge arrays
(_, x, y, _) = self.data
test = merge_arrays((x, y), usemask=False)
control = np.array([(1, 10), (2, 20), (-1, 30)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
test = merge_arrays((x, y), usemask=True)
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_flatten(self):
# Test standard & flexible
(_, x, _, z) = self.data
test = merge_arrays((x, z), flatten=True)
control = np.array([(1, 'A', 1.), (2, 'B', 2.)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
test = merge_arrays((x, z), flatten=False)
control = np.array([(1, ('A', 1.)), (2, ('B', 2.))],
dtype=[('f0', int),
('f1', [('A', '|S3'), ('B', float)])])
assert_equal(test, control)
def test_flatten_wflexible(self):
# Test flatten standard & nested
(w, x, _, _) = self.data
test = merge_arrays((x, w), flatten=True)
control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)],
dtype=[('f0', int),
('a', int), ('ba', float), ('bb', int)])
assert_equal(test, control)
test = merge_arrays((x, w), flatten=False)
controldtype = [('f0', int),
('f1', [('a', int),
('b', [('ba', float), ('bb', int)])])]
control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))],
dtype=controldtype)
assert_equal(test, control)
def test_wmasked_arrays(self):
# Test merge_arrays masked arrays
(_, x, _, _) = self.data
mx = ma.array([1, 2, 3], mask=[1, 0, 0])
test = merge_arrays((x, mx), usemask=True)
control = ma.array([(1, 1), (2, 2), (-1, 3)],
mask=[(0, 1), (0, 0), (1, 0)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
test = merge_arrays((x, mx), usemask=True, asrecarray=True)
assert_equal(test, control)
assert_(isinstance(test, MaskedRecords))
def test_w_singlefield(self):
# Test single field
test = merge_arrays((np.array([1, 2]).view([('a', int)]),
np.array([10., 20., 30.])),)
control = ma.array([(1, 10.), (2, 20.), (-1, 30.)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('a', int), ('f1', float)])
assert_equal(test, control)
def test_w_shorter_flex(self):
# Test merge_arrays w/ a shorter flexndarray.
z = self.data[-1]
# Fixme, this test looks incomplete and broken
#test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
#control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
# dtype=[('A', '|S3'), ('B', float), ('C', int)])
#assert_equal(test, control)
# Hack to avoid pyflakes warnings about unused variables
merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
dtype=[('A', '|S3'), ('B', float), ('C', int)])
def test_singlerecord(self):
(_, x, y, z) = self.data
test = merge_arrays((x[0], y[0], z[0]), usemask=False)
control = np.array([(1, 10, ('A', 1))],
dtype=[('f0', int),
('f1', int),
('f2', [('A', '|S3'), ('B', float)])])
assert_equal(test, control)
class TestAppendFields(object):
# Test append_fields
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_append_single(self):
# Test simple case
(_, x, _, _) = self.data
test = append_fields(x, 'A', data=[10, 20, 30])
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('A', int)],)
assert_equal(test, control)
def test_append_double(self):
# Test simple case
(_, x, _, _) = self.data
test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]])
control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)],
mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)],
dtype=[('f0', int), ('A', int), ('B', int)],)
assert_equal(test, control)
def test_append_on_flex(self):
# Test append_fields on flexible type arrays
z = self.data[-1]
test = append_fields(z, 'C', data=[10, 20, 30])
control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)],
mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('C', int)],)
assert_equal(test, control)
def test_append_on_nested(self):
# Test append_fields on nested fields
w = self.data[0]
test = append_fields(w, 'C', data=[10, 20, 30])
control = ma.array([(1, (2, 3.0), 10),
(4, (5, 6.0), 20),
(-1, (-1, -1.), 30)],
mask=[(
0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)],
dtype=[('a', int),
('b', [('ba', float), ('bb', int)]),
('C', int)],)
assert_equal(test, control)
class TestStackArrays(object):
# Test stack_arrays
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_solo(self):
# Test stack_arrays on single arrays
(_, x, _, _) = self.data
test = stack_arrays((x,))
assert_equal(test, x)
assert_(test is x)
test = stack_arrays(x)
assert_equal(test, x)
assert_(test is x)
def test_unnamed_fields(self):
# Tests combinations of arrays w/o named fields
(_, x, y, _) = self.data
test = stack_arrays((x, x), usemask=False)
control = np.array([1, 2, 1, 2])
assert_equal(test, control)
test = stack_arrays((x, y), usemask=False)
control = np.array([1, 2, 10, 20, 30])
assert_equal(test, control)
test = stack_arrays((y, x), usemask=False)
control = np.array([10, 20, 30, 1, 2])
assert_equal(test, control)
def test_unnamed_and_named_fields(self):
# Test combination of arrays w/ & w/o named fields
(_, x, _, z) = self.data
test = stack_arrays((x, z))
control = ma.array([(1, -1, -1), (2, -1, -1),
(-1, 'A', 1), (-1, 'B', 2)],
mask=[(0, 1, 1), (0, 1, 1),
(1, 0, 0), (1, 0, 0)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
def test_matching_named_fields(self):
# Test combination of arrays w/ matching field names
(_, x, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
test = stack_arrays((z, zz))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, zz, x))
ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)]
control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1),
('a', 10., 100., -1), ('b', 20., 200., -1),
('c', 30., 300., -1),
(-1, -1, -1, 1), (-1, -1, -1, 2)],
dtype=ndtype,
mask=[(0, 0, 1, 1), (0, 0, 1, 1),
(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1),
(1, 1, 1, 0), (1, 1, 1, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_defaults(self):
# Test defaults: no exception raised if keys of defaults are not fields.
(_, _, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.}
test = stack_arrays((z, zz), defaults=defaults)
control = ma.array([('A', 1, -9999.), ('B', 2, -9999.),
(
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_autoconversion(self):
# Tests autoconversion
adtype = [('A', int), ('B', bool), ('C', float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [('A', int), ('B', float), ('C', float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
test = stack_arrays((a, b), autoconvert=True)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
with assert_raises(TypeError):
stack_arrays((a, b), autoconvert=False)
def test_checktitles(self):
# Test using titles in the field names
adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
test = stack_arrays((a, b))
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_subdtype(self):
z = np.array([
('A', 1), ('B', 2)
], dtype=[('A', '|S3'), ('B', float, (1,))])
zz = np.array([
('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.)
], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)])
res = stack_arrays((z, zz))
expected = ma.array(
data=[
(b'A', [1.0], 0),
(b'B', [2.0], 0),
(b'a', [10.0], 100.0),
(b'b', [20.0], 200.0),
(b'c', [30.0], 300.0)],
mask=[
(False, [False], True),
(False, [False], True),
(False, [False], False),
(False, [False], False),
(False, [False], False)
],
dtype=zz.dtype
)
assert_equal(res.dtype, expected.dtype)
assert_equal(res, expected)
assert_equal(res.mask, expected.mask)
class TestJoinBy(object):
def setup(self):
self.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('d', int)])
def test_inner_join(self):
# Basic test of join_by
a, b = self.a, self.b
test = join_by('a', a, b, jointype='inner')
control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101),
(7, 57, 67, 107, 102), (8, 58, 68, 108, 103),
(9, 59, 69, 109, 104)],
dtype=[('a', int), ('b1', int), ('b2', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_join(self):
a, b = self.a, self.b
# Fixme, this test is broken
#test = join_by(('a', 'b'), a, b)
#control = np.array([(5, 55, 105, 100), (6, 56, 106, 101),
# (7, 57, 107, 102), (8, 58, 108, 103),
# (9, 59, 109, 104)],
# dtype=[('a', int), ('b', int),
# ('c', int), ('d', int)])
#assert_equal(test, control)
# Hack to avoid pyflakes unused variable warnings
join_by(('a', 'b'), a, b)
np.array([(5, 55, 105, 100), (6, 56, 106, 101),
(7, 57, 107, 102), (8, 58, 108, 103),
(9, 59, 109, 104)],
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
def test_join_subdtype(self):
# tests the bug in https://stackoverflow.com/q/44769632/102441
from numpy.lib import recfunctions as rfn
foo = np.array([(1,)],
dtype=[('key', int)])
bar = np.array([(1, np.array([1,2,3]))],
dtype=[('key', int), ('value', 'uint16', 3)])
res = join_by('key', foo, bar)
assert_equal(res, bar.view(ma.MaskedArray))
def test_outer_join(self):
a, b = self.a, self.b
test = join_by(('a', 'b'), a, b, 'outer')
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
(2, 52, 102, -1), (3, 53, 103, -1),
(4, 54, 104, -1), (5, 55, 105, -1),
(5, 65, -1, 100), (6, 56, 106, -1),
(6, 66, -1, 101), (7, 57, 107, -1),
(7, 67, -1, 102), (8, 58, 108, -1),
(8, 68, -1, 103), (9, 59, 109, -1),
(9, 69, -1, 104), (10, 70, -1, 105),
(11, 71, -1, 106), (12, 72, -1, 107),
(13, 73, -1, 108), (14, 74, -1, 109)],
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 1, 0),
(0, 0, 1, 0), (0, 0, 1, 0),
(0, 0, 1, 0), (0, 0, 1, 0)],
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_leftouter_join(self):
a, b = self.a, self.b
test = join_by(('a', 'b'), a, b, 'leftouter')
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
(2, 52, 102, -1), (3, 53, 103, -1),
(4, 54, 104, -1), (5, 55, 105, -1),
(6, 56, 106, -1), (7, 57, 107, -1),
(8, 58, 108, -1), (9, 59, 109, -1)],
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1)],
dtype=[('a', int), ('b', int), ('c', int), ('d', int)])
assert_equal(test, control)
def test_different_field_order(self):
# gh-8940
a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
# this should not give a FutureWarning:
j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False)
assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2'])
def test_duplicate_keys(self):
a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b)
@pytest.mark.xfail(reason="See comment at gh-9343")
def test_same_name_different_dtypes_key(self):
a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')])
b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
expected_dtype = np.dtype([
('key', 'S10'), ('value1', '<f4'), ('value2', '<f4')])
a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
res = join_by('key', a, b)
assert_equal(res.dtype, expected_dtype)
def test_same_name_different_dtypes(self):
# gh-9338
a_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
b_dtype = np.dtype([('key', 'S10'), ('value', '<f8')])
expected_dtype = np.dtype([
('key', '|S10'), ('value1', '<f4'), ('value2', '<f8')])
a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
res = join_by('key', a, b)
assert_equal(res.dtype, expected_dtype)
def test_subarray_key(self):
a_dtype = np.dtype([('pos', int, 3), ('f', '<f4')])
a = np.array([([1, 1, 1], np.pi), ([1, 2, 3], 0.0)], dtype=a_dtype)
b_dtype = np.dtype([('pos', int, 3), ('g', '<f4')])
b = np.array([([1, 1, 1], 3), ([3, 2, 1], 0.0)], dtype=b_dtype)
expected_dtype = np.dtype([('pos', int, 3), ('f', '<f4'), ('g', '<f4')])
expected = np.array([([1, 1, 1], np.pi, 3)], dtype=expected_dtype)
res = join_by('pos', a, b)
assert_equal(res.dtype, expected_dtype)
assert_equal(res, expected)
def test_padded_dtype(self):
dt = np.dtype('i1,f4', align=True)
dt.names = ('k', 'v')
assert_(len(dt.descr), 3) # padding field is inserted
a = np.array([(1, 3), (3, 2)], dt)
b = np.array([(1, 1), (2, 2)], dt)
res = join_by('k', a, b)
# no padding fields remain
expected_dtype = np.dtype([
('k', 'i1'), ('v1', 'f4'), ('v2', 'f4')
])
assert_equal(res.dtype, expected_dtype)
class TestJoinBy2(object):
@classmethod
def setup(cls):
cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
cls.b = np.array(list(zip(np.arange(10), np.arange(65, 75),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('d', int)])
def test_no_r1postfix(self):
# Basic test of join_by no_r1postfix
a, b = self.a, self.b
test = join_by(
'a', a, b, r1postfix='', r2postfix='2', jointype='inner')
control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
(2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
(4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
(6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
(8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
dtype=[('a', int), ('b', int), ('b2', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_no_postfix(self):
assert_raises(ValueError, join_by, 'a', self.a, self.b,
r1postfix='', r2postfix='')
def test_no_r2postfix(self):
# Basic test of join_by no_r2postfix
a, b = self.a, self.b
test = join_by(
'a', a, b, r1postfix='1', r2postfix='', jointype='inner')
control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
(2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
(4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
(6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
(8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
dtype=[('a', int), ('b1', int), ('b', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_two_keys_two_vars(self):
a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(50, 60), np.arange(10, 20))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(65, 75), np.arange(0, 10))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1),
(10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3),
(10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5),
(10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7),
(10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)],
dtype=[('k', int), ('a', int), ('b1', int),
('b2', int), ('c1', int), ('c2', int)])
test = join_by(
['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner')
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
class TestAppendFieldsObj(object):
"""
Test append_fields with arrays containing objects
"""
# https://github.com/numpy/numpy/issues/2346
def setup(self):
from datetime import date
self.data = dict(obj=date(2000, 1, 1))
def test_append_to_objects(self):
"Test append_fields when the base array contains objects"
obj = self.data['obj']
x = np.array([(obj, 1.), (obj, 2.)],
dtype=[('A', object), ('B', float)])
y = np.array([10, 20], dtype=int)
test = append_fields(x, 'C', data=y, usemask=False)
control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)],
dtype=[('A', object), ('B', float), ('C', int)])
assert_equal(test, control)
|
|
from graphserver.core import Graph, TripBoard, HeadwayBoard, HeadwayAlight, Crossing, TripAlight, Timezone, Street, Link, ElapseTime
from optparse import OptionParser
from graphserver.graphdb import GraphDatabase
from graphserver.ext.gtfs.gtfsdb import GTFSDatabase, parse_gtfs_date
import sys
import pytz
from tools import service_calendar_from_timezone
import datetime
def cons(ary):
for i in range(len(ary)-1):
yield (ary[i], ary[i+1])
class GTFSGraphCompiler:
def __init__(self, gtfsdb, agency_namespace, agency_id=None, reporter=None):
self.gtfsdb = gtfsdb
self.agency_namespace = agency_namespace
self.reporter = reporter
# get graphserver.core.Timezone and graphserver.core.ServiceCalendars from gtfsdb for agency with given agency_id
timezone_name = gtfsdb.agency_timezone_name(agency_id)
self.tz = Timezone.generate( timezone_name )
if reporter: reporter.write( "constructing service calendar for timezone '%s'\n"%timezone_name )
self.sc = service_calendar_from_timezone(gtfsdb, timezone_name )
def bundle_to_boardalight_edges(self, bundle, service_id):
"""takes a bundle and yields a bunch of edges"""
stop_time_bundles = bundle.stop_time_bundles(service_id)
n_trips = len(bundle.trip_ids)
# If there's less than two stations on this trip bundle, the trip bundle doesn't actually span two places
if len(stop_time_bundles)<2:
return
# If there are no stop_times in a bundle on this service day, there is nothing to load
if n_trips==0:
return
if self.reporter: self.reporter.write( "inserting %d trips with %d stop_time bundles on service_id '%s'\n"%(len(stop_time_bundles[0]),len(stop_time_bundles),service_id) )
#add board edges
for i, stop_time_bundle in enumerate(stop_time_bundles[:-1]):
trip_id, arrival_time, departure_time, stop_id, stop_sequence, stop_dist_traveled = stop_time_bundle[0]
if arrival_time != departure_time:
patternstop_vx_name = "psv-%s-%03d-%03d-%s-depart"%(self.agency_namespace,bundle.pattern.pattern_id,i,service_id)
# construct the board/alight/dwell triangle for this patternstop
patternstop_arrival_vx_name = "psv-%s-%03d-%03d-%s-arrive"%(self.agency_namespace,bundle.pattern.pattern_id,i,service_id)
dwell_crossing = Crossing()
for trip_id, arrival_time, departure_time, stop_id, stop_sequence, stop_dist_traveled in stop_time_bundle:
dwell_crossing.add_crossing_time( trip_id, departure_time-arrival_time )
yield (patternstop_arrival_vx_name,
patternstop_vx_name,
dwell_crossing)
else:
patternstop_vx_name = "psv-%s-%03d-%03d-%s"%(self.agency_namespace,bundle.pattern.pattern_id,i,service_id)
b = TripBoard(service_id, self.sc, self.tz, 0)
for trip_id, arrival_time, departure_time, stop_id, stop_sequence, stop_dist_traveled in stop_time_bundle:
b.add_boarding( trip_id, departure_time, stop_sequence )
yield ( "sta-%s"%stop_id, patternstop_vx_name, b )
#add alight edges
for i, stop_time_bundle in enumerate(stop_time_bundles[1:]):
trip_id, arrival_time, departure_time, stop_id, stop_sequence, stop_dist_traveled = stop_time_bundle[0]
if arrival_time != departure_time:
patternstop_vx_name = "psv-%s-%03d-%03d-%s-arrive"%(self.agency_namespace,bundle.pattern.pattern_id,i+1,service_id)
else:
patternstop_vx_name = "psv-%s-%03d-%03d-%s"%(self.agency_namespace,bundle.pattern.pattern_id,i+1,service_id)
al = TripAlight(service_id, self.sc, self.tz, 0)
for trip_id, arrival_time, departure_time, stop_id, stop_sequence, stop_dist_traveled in stop_time_bundle:
al.add_alighting( trip_id.encode('ascii'), arrival_time, stop_sequence )
yield ( patternstop_vx_name, "sta-%s"%stop_id, al )
# add crossing edges
for i, (from_stop_time_bundle, to_stop_time_bundle) in enumerate(cons(stop_time_bundles)):
trip_id, from_arrival_time, from_departure_time, stop_id, stop_sequence, stop_dist_traveled = from_stop_time_bundle[0]
trip_id, to_arrival_time, to_departure_time, stop_id, stop_sequence, stop_dist_traveled = to_stop_time_bundle[0]
if from_arrival_time!=from_departure_time:
from_patternstop_vx_name = "psv-%s-%03d-%03d-%s-depart"%(self.agency_namespace,bundle.pattern.pattern_id,i,service_id)
else:
from_patternstop_vx_name = "psv-%s-%03d-%03d-%s"%(self.agency_namespace,bundle.pattern.pattern_id,i,service_id)
if to_arrival_time!=to_departure_time:
to_patternstop_vx_name = "psv-%s-%03d-%03d-%s-arrive"%(self.agency_namespace,bundle.pattern.pattern_id,i+1,service_id)
else:
to_patternstop_vx_name = "psv-%s-%03d-%03d-%s"%(self.agency_namespace,bundle.pattern.pattern_id,i+1,service_id)
crossing = Crossing()
for i in range( len( from_stop_time_bundle ) ):
trip_id, from_arrival_time, from_departure_time, stop_id, stop_sequence, stop_dist_traveled = from_stop_time_bundle[i]
trip_id, to_arrival_time, to_departure_time, stop_id, stop_sequence, stop_dist_traveled = to_stop_time_bundle[i]
crossing.add_crossing_time( trip_id, (to_arrival_time-from_departure_time) )
yield ( from_patternstop_vx_name,
to_patternstop_vx_name,
crossing )
def gtfsdb_to_scheduled_edges(self, maxtrips=None, service_ids=None):
# compile trip bundles from gtfsdb
if self.reporter: self.reporter.write( "Compiling trip bundles...\n" )
bundles = self.gtfsdb.compile_trip_bundles(maxtrips=maxtrips, reporter=self.reporter)
# load bundles to graph
if self.reporter: self.reporter.write( "Loading trip bundles into graph...\n" )
n_bundles = len(bundles)
for i, bundle in enumerate(bundles):
if self.reporter: self.reporter.write( "%d/%d loading %s\n"%(i+1, n_bundles, bundle) )
for service_id in [x.encode("ascii") for x in self.gtfsdb.service_ids()]:
if service_ids is not None and service_id not in service_ids:
continue
for fromv_label, tov_label, edge in self.bundle_to_boardalight_edges(bundle, service_id):
yield fromv_label, tov_label, edge
def gtfsdb_to_headway_edges( self, maxtrips=None ):
# load headways
if self.reporter: self.reporter.write( "Loading headways trips to graph...\n" )
for trip_id, start_time, end_time, headway_secs in self.gtfsdb.execute( "SELECT * FROM frequencies" ):
service_id = list(self.gtfsdb.execute( "SELECT service_id FROM trips WHERE trip_id=?", (trip_id,) ))[0][0]
service_id = service_id.encode('utf-8')
hb = HeadwayBoard( service_id, self.sc, self.tz, 0, trip_id.encode('utf-8'), start_time, end_time, headway_secs )
ha = HeadwayAlight( service_id, self.sc, self.tz, 0, trip_id.encode('utf-8'), start_time, end_time, headway_secs )
stoptimes = list(self.gtfsdb.execute( "SELECT * FROM stop_times WHERE trip_id=? ORDER BY stop_sequence", (trip_id,)) )
#add board edges
for trip_id, arrival_time, departure_time, stop_id, stop_sequence, stop_dist_traveled in stoptimes[:-1]:
yield ( "sta-%s"%stop_id, "hwv-%s-%s-%s"%(self.agency_namespace,stop_id, trip_id), hb )
#add alight edges
for trip_id, arrival_time, departure_time, stop_id, stop_sequence, stop_dist_traveled in stoptimes[1:]:
yield ( "hwv-%s-%s-%s"%(self.agency_namespace,stop_id, trip_id), "sta-%s"%stop_id, ha )
#add crossing edges
for (trip_id1, arrival_time1, departure_time1, stop_id1, stop_sequence1, stop_dist_traveled1), (trip_id2, arrival_time2, departure_time2, stop_id2, stop_sequence2,stop_dist_traveled2) in cons(stoptimes):
cr = Crossing()
cr.add_crossing_time( trip_id1, (arrival_time2-departure_time1) )
yield ( "hwv-%s-%s-%s"%(self.agency_namespace,stop_id1, trip_id1), "hwv-%s-%s-%s"%(self.agency_namespace,stop_id2, trip_id2), cr )
def gtfsdb_to_transfer_edges( self ):
# load transfers
if self.reporter: self.reporter.write( "Loading transfers to graph...\n" )
# keep track to avoid redundancies
# this assumes that transfer relationships are bi-directional.
# TODO this implementation is also incomplete - it's theoretically possible that
# a transfers.txt table could contain "A,A,3,", which would mean you can't transfer
# at A.
seen = set([])
for stop_id1, stop_id2, conn_type, min_transfer_time in self.gtfsdb.execute( "SELECT * FROM transfers" ):
s1 = "sta-%s"%stop_id1
s2 = "sta-%s"%stop_id2
# TODO - what is the semantics of this? see note above
if s1 == s2:
continue
key = ".".join(sorted([s1,s2]))
if key not in seen:
seen.add(key)
else:
continue
assert conn_type == None or type(conn_type) == int
if conn_type in (0, None): # This is a recommended transfer point between two routes
if min_transfer_time in ("", None):
yield (s1, s2, Link())
yield (s2, s1, Link())
else:
yield (s1, s2, ElapseTime(int(min_transfer_time)))
yield (s2, s1, ElapseTime(int(min_transfer_time)))
elif conn_type == 1: # This is a timed transfer point between two routes
yield (s1, s2, Link())
yield (s2, s1, Link())
elif conn_type == 2: # This transfer requires a minimum amount of time
yield (s1, s2, ElapseTime(int(min_transfer_time)))
yield (s2, s1, ElapseTime(int(min_transfer_time)))
elif conn_type == 3: # Transfers are not possible between routes at this location.
print "WARNING: Support for no-transfer (transfers.txt transfer_type=3) not implemented."
def gtfsdb_to_edges( self, maxtrips=None, service_ids=None ):
for edge_tuple in self.gtfsdb_to_scheduled_edges(maxtrips, service_ids=service_ids):
yield edge_tuple
for edge_tuple in self.gtfsdb_to_headway_edges(maxtrips):
yield edge_tuple
for edge_tuple in self.gtfsdb_to_transfer_edges():
yield edge_tuple
def gdb_load_gtfsdb(gdb, agency_namespace, gtfsdb, cursor, agency_id=None, maxtrips=None, sample_date=None, reporter=sys.stdout):
# determine which service periods run on the given day, if a day is given
if sample_date is not None:
sample_date = datetime.date( *parse_gtfs_date( sample_date ) )
acceptable_service_ids = gtfsdb.service_periods( sample_date )
print "Importing only service periods operating on %s: %s"%(sample_date, acceptable_service_ids)
else:
acceptable_service_ids = None
compiler = GTFSGraphCompiler( gtfsdb, agency_namespace, agency_id, reporter )
c = gdb.get_cursor()
v_added = set([])
for fromv_label, tov_label, edge in compiler.gtfsdb_to_edges( maxtrips, service_ids=acceptable_service_ids ):
if fromv_label not in v_added:
gdb.add_vertex( fromv_label, c )
v_added.add(fromv_label)
if tov_label not in v_added:
gdb.add_vertex( tov_label, c )
v_added.add(tov_label)
gdb.add_edge( fromv_label, tov_label, edge, c )
gdb.commit()
def graph_load_gtfsdb( agency_namespace, gtfsdb, agency_id=None, maxtrips=None, reporter=sys.stdout ):
compiler = GTFSGraphCompiler( gtfsdb, agency_namespace, agency_id, reporter )
gg = Graph()
for fromv_label, tov_label, edge in compiler.gtfsdb_to_edges( maxtrips ):
gg.add_vertex( fromv_label )
gg.add_vertex( tov_label )
gg.add_edge( fromv_label, tov_label, edge )
return gg
def main():
usage = """usage: python gdb_import_gtfs.py [options] <graphdb_filename> <gtfsdb_filename> [<agency_id>]"""
parser = OptionParser(usage=usage)
parser.add_option("-n", "--namespace", dest="namespace", default="0",
help="agency namespace")
parser.add_option("-m", "--maxtrips", dest="maxtrips", default=None, help="maximum number of trips to load")
parser.add_option("-d", "--date", dest="sample_date", default=None, help="only load transit running on a given day. YYYYMMDD" )
(options, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
exit(-1)
graphdb_filename = args[0]
gtfsdb_filename = args[1]
agency_id = args[2] if len(args)==3 else None
print "importing from gtfsdb '%s' into graphdb '%s'"%(gtfsdb_filename, graphdb_filename)
gtfsdb = GTFSDatabase( gtfsdb_filename )
gdb = GraphDatabase( graphdb_filename, overwrite=False )
maxtrips = int(options.maxtrips) if options.maxtrips else None
gdb_load_gtfsdb( gdb, options.namespace, gtfsdb, gdb.get_cursor(), agency_id, maxtrips=maxtrips, sample_date=options.sample_date)
gdb.commit()
print "done"
if __name__ == '__main__':
main()
|
|
from time import sleep
from lettuce import world, step
from datetime import datetime
from nose.tools import assert_false
from questionnaire.features.pages.home import HomePage
from questionnaire.features.pages.manage import ManageJrfPage
from questionnaire.models import Questionnaire, Section, Organization, Region, SubSection, Question, QuestionGroup
from questionnaire.tests.factories.questionnaire_factory import QuestionnaireFactory
from questionnaire.tests.factories.section_factory import SectionFactory
@step(u'I have four finalised questionnaires')
def given_i_have_four_finalised_questionnaires(step):
world.finalized_questionnaire_year = 2015
world.questionnaire1 = Questionnaire.objects.create(name="JRF Jamaica version", description="description",
year=2012, status=Questionnaire.FINALIZED)
Section.objects.create(title="School Based Section1", order=0, questionnaire=world.questionnaire1, name="Name")
world.questionnaire2 = Questionnaire.objects.create(name="JRF Brazil version", description="description",
year=2009, status=Questionnaire.FINALIZED)
Section.objects.create(title="School Section1", order=0, questionnaire=world.questionnaire2, name="Section1 name")
world.questionnaire3 = Questionnaire.objects.create(name="JRF Bolivia version", description="some more description",
year=2011, status=Questionnaire.FINALIZED)
Section.objects.create(title="Section1", order=0, questionnaire=world.questionnaire3, name="School Imm. Delivery")
world.questionnaire4 = Questionnaire.objects.create(name="JRF kampala version", description="description",
year=world.finalized_questionnaire_year, status=Questionnaire.FINALIZED)
Section.objects.create(title="Section1", order=0, questionnaire=world.questionnaire4, name="School Imm. Delivery")
@step(u'And I have two draft questionnaires for two years')
def and_i_have_two_draft_questionnaires_for_two_years(step):
world.questionnaire5 = Questionnaire.objects.create(name="JRF Bolivia version", description="some more description",
year=2013, status=Questionnaire.DRAFT)
Section.objects.create(title="Section1", order=0, questionnaire=world.questionnaire5, name="School Imm. Delivery")
world.questionnaire6 = Questionnaire.objects.create(name="JRF kampala version", description="description",
year=2013, status=Questionnaire.DRAFT)
Section.objects.create(title="Section1", order=0, questionnaire=world.questionnaire6, name="School Imm. Delivery")
@step(u'Then I should see manage JRF, users, question bank, extract links')
def then_i_should_see_manage_jrf_users_question_bank_extract_and_attachments_links(step):
world.page.is_text_present("HOME", "EXTRACT", "MANAGE JRF", "USERS", "QUESTIONS")
@step(u'Then I should see a list of the three most recent finalised questionnaires')
def then_i_should_see_a_list_of_the_three_most_recent_finalised_questionnaires(step):
world.page = HomePage(world.browser)
world.page.links_present_by_text(["%s %s" % (world.questionnaire1.name, world.questionnaire1.year),
"%s %s" % (world.questionnaire2.name, world.questionnaire2.year),
"%s %s" % (world.questionnaire3.name, world.questionnaire3.year)])
@step(u'And I should see a list of draft questionnaires')
def and_i_should_see_a_list_of_draft_questionnaires(step):
world.page.links_present_by_text(["%s %s" % (world.questionnaire5.name, world.questionnaire5.year),
"%s %s" % (world.questionnaire6.name, world.questionnaire6.year)])
assert world.page.is_element_present_by_id('id-edit-questionnaire-%s' % world.questionnaire5.id)
assert world.page.is_element_present_by_id('id-edit-questionnaire-%s' % world.questionnaire6.id)
assert world.page.is_element_present_by_id('id-finalize-%s' % world.questionnaire5.id)
assert world.page.is_element_present_by_id('id-finalize-%s' % world.questionnaire6.id)
@step(u'I visit the manage JRF page')
def and_i_visit_manage_jrf_page(step):
world.page.click_by_id('id-manage-jrf')
@step(u'And When I click Older')
def and_when_i_click_older(step):
world.page.click_by_id('id-older-jrf')
@step(u'Then I should also see the fourth finalised questionnaire')
def then_i_should_also_see_the_fourth_finalised_questionnaire(step):
world.page.links_present_by_text(["%s %s" % (world.questionnaire4.name, world.questionnaire4.year)])
@step(u'When I choose to create a new questionnaire')
def when_i_choose_to_create_a_new_questionnaire(step):
world.page.click_by_id('id-create-new')
@step(u'Then I should see options for selecting a finalized questionnaire and a reporting year')
def then_i_should_see_options_for_selecting_a_finalized_questionnaire_and_a_reporting_year(step):
world.page.is_text_present('Finalized Questionnaires')
world.page.is_text_present('Reporting Year')
assert world.page.is_element_present_by_id('id_questionnaire')
assert world.page.is_element_present_by_id('id_year')
@step(u'When I select a finalized questionnaire and a reporting year')
def when_i_select_a_finalized_questionnaire_and_a_reporting_year(step):
world.page.select('questionnaire', world.questionnaire1.id)
world.page.select('year', (datetime.now().year + 1))
@step(u'And I give it a new name')
def and_i_give_it_a_new_name(step):
world.page.fill_form({'name': 'Latest Questionnaire'})
@step(u'When I choose to duplicate the questionnaire')
def when_i_choose_to_duplicate_the_questionnaire(step):
world.page.click_by_id('duplicate_questionnaire_button')
@step(u'Then I should see a message that the questionnaire was duplicated successfully')
def then_i_should_see_a_message_that_the_questionnaire_was_duplicated_successfully(step):
world.page.is_element_present_by_css('.alert alert-success')
world.page.is_text_present('The questionnaire has been duplicated successfully, You can now go ahead and edit it')
@step(u'Then I should see the new questionnaire listed')
def then_i_should_see_the_new_questionnaire_listed(step):
world.latest_questionnaire = Questionnaire.objects.filter(status=Questionnaire.FINALIZED).latest('created')
assert world.page.is_element_present_by_id("questionnaire-%s" % world.latest_questionnaire.id)
@step(u'Then I should a validation error message')
def then_i_should_a_validation_error_message(step):
world.page.is_element_present_by_css('.error')
world.page.is_text_present('This field is required.')
@step(u'And I have draft and finalised core questionnaires')
def and_i_have_draft_and_finalised_core_questionnaires(step):
world.questionnaire1 = Questionnaire.objects.create(name="Questionnaire1", description="Section 1 Description",
year=2010, status=Questionnaire.FINALIZED)
Section.objects.create(title="Section1", order=0, questionnaire=world.questionnaire1, name="Section 1 Name")
world.questionnaire2 = Questionnaire.objects.create(name="Questionnaire2", description="Section 1 Description",
year=2011, status=Questionnaire.FINALIZED)
Section.objects.create(title="Section1", order=0, questionnaire=world.questionnaire2, name="Section 1 Name")
world.questionnaire3 = Questionnaire.objects.create(name="Questionnaire3", description="Section 1 Description",
year=2012, status=Questionnaire.DRAFT)
Section.objects.create(title="Section1", order=0, questionnaire=world.questionnaire3, name="Section 1 Name")
world.questionnaire4 = Questionnaire.objects.create(name="Questionnaire4", description="Section 1 Description",
year=2013, status=Questionnaire.DRAFT)
Section.objects.create(title="Section1", order=0, questionnaire=world.questionnaire4, name="Section 1 Name")
@step(u'Then I should see an option to lock each draft Core Questionnaire')
def then_i_should_see_an_option_to_lock_each_draft_core_questionnaire(step):
assert (world.page.is_element_present_by_id('id-finalize-%s' % world.questionnaire3.id))
assert (world.page.is_element_present_by_id('id-finalize-%s' % world.questionnaire4.id))
@step(u'And I should see an option to unlock each finalised Core Questionnaire')
def and_i_should_see_an_option_to_unlock_each_finalised_core_questionnaire(step):
assert (world.page.is_element_present_by_id('id-unfinalize-%s' % world.questionnaire1.id))
assert (world.page.is_element_present_by_id('id-unfinalize-%s' % world.questionnaire2.id))
@step(u'When I lock a draft Core Questionnaire')
def when_i_lock_a_draft_core_questionnaire(step):
world.page.click_by_id('id-finalize-%s' % world.questionnaire3.id)
@step(u'Then it should now have an option to unlock it')
def then_it_should_now_have_an_option_to_unlock_it(step):
world.page.click_by_id('id-unfinalize-%s' % world.questionnaire3.id)
@step(u'When I unlock a finalised Core Questionnaire')
def when_i_unlock_a_finalised_core_questionnaire(step):
world.page.click_by_id('id-unfinalize-%s' % world.questionnaire1.id)
@step(u'Then it should now have an option to lock it')
def then_it_should_now_have_an_option_to_lock_it(step):
world.page.click_by_id('id-finalize-%s' % world.questionnaire1.id)
@step(u'When I click on a Draft Core Questionnaire')
def when_i_click_on_a_draft_core_questionnaire(step):
world.page.click_by_id('questionnaire-%s' % world.questionnaire4.id)
@step(u'Then it should open in an edit view')
def then_it_should_open_in_an_edit_view(step):
world.page.is_text_present('New Section')
world.page.is_text_present('New Subsection')
@step(u'I click on a Finalised Core Questionnaire')
def when_i_click_on_a_finalised_core_questionnaire(step):
world.page.click_by_id('questionnaire-%s' % world.questionnaire2.id)
@step(u'Then it should open in a preview mode')
def then_it_should_open_in_a_preview_mode(step):
world.page.is_text_present('New Section', status=False)
world.page.is_text_present('Assign Question', status=False)
world.page.is_text_present('New Subsection', status=False)
@step(u'And I have two finalised questionnaires')
def and_i_have_two_finalised_questionnaires(step):
world.questionnaire7 = Questionnaire.objects.create(name="JRF Kampala", description="description",
year=2014, status=Questionnaire.FINALIZED)
Section.objects.create(title="School Based Section1", order=0, questionnaire=world.questionnaire7, name="Name")
world.questionnaire8 = Questionnaire.objects.create(name="JRF Brazil", description="description",
year=2015, status=Questionnaire.FINALIZED)
Section.objects.create(title="School Section1", order=0, questionnaire=world.questionnaire8, name="Section1 name")
world.org = Organization.objects.create(name="WHO")
world.afro = Region.objects.create(name="AFRO", organization=world.org)
world.amer = Region.objects.create(name="AMER", organization=world.org)
world.euro = Region.objects.create(name="EURO", organization=world.org)
world.asia = Region.objects.create(name="ASIA", organization=world.org)
@step(u'And I see finalized questionnaires')
def and_i_see_finalized_questionnaires(step):
world.page.links_present_by_text(["%s %s" % (world.questionnaire7.name, world.questionnaire7.year),
"%s %s" % (world.questionnaire8.name, world.questionnaire8.year)])
world.page.is_element_present_by_id('id-unfinalize-%s' % world.questionnaire8.id)
@step(u'Then I should see an option to send to regions on each of the finalized questionnaires')
def then_i_should_see_an_option_to_send_to_regions_on_each_of_the_finalized_questionnaires(step):
world.page.is_element_present_by_id('id-publish-questionnaire-%s' % world.questionnaire7.id)
world.page.is_element_present_by_id('id-publish-questionnaire-%s' % world.questionnaire8.id)
@step(u'When I choose option to send core questionnaire to regions')
def when_i_choose_option_to_send_core_questionnaire_to_regions(step):
world.page.click_by_id('id-publish-questionnaire-%s' % world.questionnaire7.id)
@step(u'Then I should see an interface to choose the regions to which to publish the finalised Core Questionnaire')
def then_i_should_see_an_interface_to_choose_the_regions_to_which_to_publish_the_finalised_core_questionnaire(step):
world.page.is_text_present("Publish Questionnaire : %s" % world.questionnaire7.name)
@step(u'And I should be able to select one region to which to publish the finalised Core Questionnaire')
def and_i_should_be_able_to_select_one_region_to_which_to_publish_the_finalised_core_questionnaire(step):
world.page.check("%s" % world.afro.id)
world.page.click_by_css('button.submit')
@step(u'And I select two regions to which to publish the finalised Core Questionnaire')
def and_i_select_two_regions_to_which_to_publish_the_finalised_core_questionnaire(step):
world.page.check(world.amer.id)
world.page.check(world.asia.id)
@step(u'When I click publish button')
def when_i_click_publish_button(step):
world.page.click_by_css('.submit')
@step(u'And I should be able to confirm that the Core Questionnaire is published to the regions I selected')
def and_i_should_be_able_to_confirm_that_the_core_questionnaire_is_published_to_the_regions_i_selected(step):
world.page.is_text_present("The questionnaire has been published to %s, %s" % (world.amer.name, world.asia.name))
world.page.is_text_present("%s" % world.amer.name)
world.page.is_text_present("%s" % world.asia.name)
@step(u'And I should be able to confirm that the regions to which I published the questionnaire is not on the list')
def and_i_should_be_able_to_confirm_that_the_regions_to_which_i_published_the_questionnaire_is_not_on_the_list(step):
world.page.click_by_id('id-publish-questionnaire-%s' % world.questionnaire7.id)
world.page.is_text_present("%s" % world.amer.name, status=False)
world.page.is_text_present("%s" % world.asia.name, status=False)
@step(u'And I have a finalised regional questionnaire')
def and_i_have_a_finalised_regional_questionnaire(step):
world.finalised_regional_questionnaire = Questionnaire.objects.create(name="JRF Finalised Regional",
description="Description",
year=2014, status=Questionnaire.FINALIZED,
region=world.region_afro)
world.regional_section = Section.objects.create(order=0, title="Section AFRO", description="Description",
questionnaire=world.finalised_regional_questionnaire,
name="Cover page")
world.regional_subsection = SubSection.objects.create(order=1, section=world.regional_section)
world.regional_question1 = Question.objects.create(text='Name of person in Ministry of Health', UID='C001',
answer_type='Text')
parent = QuestionGroup.objects.create(subsection=world.regional_subsection, order=1)
parent.question.add(world.regional_question1)
@step(u'When I click that regional questionnaire')
def when_i_click_that_regional_questionnaire(step):
world.page.click_by_id('questionnaire-%s' % world.finalised_regional_questionnaire.id)
@step(u'When I select to approve the regional questionnaire')
def when_i_select_to_approve_the_regional_questionnaire(step):
world.page.click_by_id('id-approve-questionnaire-%s' % world.finalised_regional_questionnaire.id)
@step(u'Then I should see a confirmation prompt to approve the questionnaire')
def then_i_should_see_a_confirmation_prompt_to_approve_the_questionnaire(step):
world.page.is_text_present('Confirm Questionnaire Acceptance')
world.page.is_text_present('Are you sure you want to accept this questionnaire?')
@step(u'When I confirm the questionnaire approval')
def when_i_confirm_the_questionnaire_approval(step):
world.page.click_by_id('confirm-accept-questionnaire-%s' % world.finalised_regional_questionnaire.id)
@step(u'Then I should see a message that the questionnaire was approved')
def then_i_should_see_a_message_that_the_questionnaire_was_approved(step):
world.page.is_text_present('The questionnaire has been accepted successfully.')
@step(u'And I should see a new status indicating that the questionnaire was approved')
def and_i_should_see_a_new_status_indicating_that_the_questionnaire_was_approved(step):
world.page.is_text_present('Published')
@step(u'And there should no longer be an option to approve the questionnaire')
def and_there_should_no_longer_be_an_option_to_approve_the_questionnaire(step):
assert world.page.is_element_not_present_by_id(
"id-approve-questionnaire-%s" % world.finalised_regional_questionnaire.id)
@step(u'Then I should see modal with the questionnaires current name')
def then_i_should_see_modal_with_the_questionnaires_current_name(step):
world.page.is_text_present('Edit Name Of Questionnaire')
world.page.is_text_present(world.questionnaire5.name)
@step(u'When I update the name of the questionnaire and save my changes')
def when_i_update_the_name_of_the_questionnaire_and_save_my_changes(step):
world.page.fill_this_element('id_name', 'Updated Questionnaire Name')
world.page.select('year', 2014)
world.page.click_by_id('save-questionnaire-name-%s' % world.questionnaire5.id)
@step(u'Then I should see a message that questionnaire was updated')
def then_i_should_see_a_message_that_questionnaire_was_updated(step):
world.page.is_text_present('The revision was updated successfully.')
@step(u'And I should see the questionnaire with its new name')
def and_i_should_see_the_questionnaire_with_its_new_name(step):
sleep(10)
world.page.is_text_present('Updated Questionnaire Name')
@step(u'Then I should view it in preview mode')
def then_i_should_view_it_in_preview_mode(step):
assert_false(world.page.is_element_present_by_id('id-edit-section-%s' % world.finalised_section.id))
assert_false(world.page.is_element_present_by_id('id-delete-section-%s' % world.finalised_section.id))
@step(u'And I have "([^"]*)" "([^"]*)" core questionnaire')
def and_i_have_group1_group2_core_questionnaire(step, number, status):
create_questionnaire(world, number, status)
@step(u'When I click "([^"]*)" button on that core questionnaire')
def when_i_click_group1_button_on_that_core_questionnaire(step, action):
world.page.click_by_id('id-%s-questionnaire-%s' % (action, world.finalised_core_questionnaire.id))
@step(u'When I confirm "([^"]*)" the questionnaire')
def when_i_confirm_group1_the_questionnaire(step, action):
world.page.click_by_id('confirm-%s-questionnaire-%s' % (action, world.finalised_core_questionnaire.id))
sleep(1)
@step(u'Then I should see the questionnaire "([^"]*)"')
def then_i_should_see_the_questionnaire_group1(step, status):
sleep(2)
world.page.is_text_present(
'The questionnaire \'%s\' was %s successfully.' % (world.finalised_core_questionnaire.name, status))
@step(u'When I click on the "([^"]*)" questionnaire')
def when_i_click_on_the_group1_questionnaire(step, status):
world.page.click_by_id('%s-questionnaire-%s' % (status, world.finalised_core_questionnaire.id))
def create_questionnaire(world, number, status):
for i in range(int(number)):
world.finalised_core_questionnaire = Questionnaire.objects.create(
name="JRF %s Regional %d" % (status.capitalize(), i),
description="Description",
year='201%d' % i, status=status,
region=None)
world.finalised_section = SectionFactory(questionnaire=world.finalised_core_questionnaire)
world.finalized_subsection = SubSection.objects.create(order=1, section=world.finalised_section)
world.refinalized_question1 = Question.objects.create(text='Name of person in Ministry of Health',
UID='C001%s' % i,
answer_type='Text')
parent = QuestionGroup.objects.create(subsection=world.finalized_subsection, order=i)
parent.question.add(world.refinalized_question1)
@step(u'When I click the edit questionnaire button')
def when_i_click_the_edit_questionnaire_button(step):
world.page.click_by_id('id-edit-questionnaire-%s' % world.questionnaire5.id)
sleep(2)
@step(u'I update the year to one of the finalized questionnaire year')
def and_i_update_the_year_to_2014(step):
world.page.select('year', world.finalized_questionnaire_year)
@step(u'Then I should see a warning message')
def then_i_should_see_a_warning_message(step):
warning_message = 'A Revision of the year %s already exists. If you go ahead, that revision will be archived.' % world.finalized_questionnaire_year
world.page.is_text_present(warning_message)
@step(u'When I save my changes')
def when_i_save_my_changes(step):
world.page.click_by_id('save-questionnaire-name-%s' % world.questionnaire5.id)
@step(u'I should see the corresponding existing finalized questionnaire is archived')
def then_i_should_see_the_existing_2012_questionnaire_is_archived(step):
world.page.is_element_present_by_id('archived-questionnaire-%s' % world.questionnaire4.id)
|
|
"""Component for interacting with a Lutron Caseta system."""
from __future__ import annotations
import asyncio
import contextlib
import logging
import ssl
import async_timeout
from pylutron_caseta import BUTTON_STATUS_PRESSED
from pylutron_caseta.smartbridge import Smartbridge
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, Platform
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import DeviceInfo, Entity
from .const import (
ACTION_PRESS,
ACTION_RELEASE,
ATTR_ACTION,
ATTR_AREA_NAME,
ATTR_BUTTON_NUMBER,
ATTR_DEVICE_NAME,
ATTR_LEAP_BUTTON_NUMBER,
ATTR_SERIAL,
ATTR_TYPE,
BRIDGE_DEVICE,
BRIDGE_DEVICE_ID,
BRIDGE_LEAP,
BRIDGE_TIMEOUT,
BUTTON_DEVICES,
CONF_CA_CERTS,
CONF_CERTFILE,
CONF_KEYFILE,
DOMAIN,
LUTRON_CASETA_BUTTON_EVENT,
MANUFACTURER,
)
from .device_trigger import (
DEVICE_TYPE_SUBTYPE_MAP_TO_LIP,
LEAP_TO_DEVICE_TYPE_SUBTYPE_MAP,
)
_LOGGER = logging.getLogger(__name__)
DATA_BRIDGE_CONFIG = "lutron_caseta_bridges"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_KEYFILE): cv.string,
vol.Required(CONF_CERTFILE): cv.string,
vol.Required(CONF_CA_CERTS): cv.string,
}
],
)
},
extra=vol.ALLOW_EXTRA,
)
PLATFORMS = [
Platform.BINARY_SENSOR,
Platform.COVER,
Platform.FAN,
Platform.LIGHT,
Platform.SCENE,
Platform.SWITCH,
]
async def async_setup(hass, base_config):
"""Set up the Lutron component."""
hass.data.setdefault(DOMAIN, {})
if DOMAIN in base_config:
bridge_configs = base_config[DOMAIN]
for config in bridge_configs:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
# extract the config keys one-by-one just to be explicit
data={
CONF_HOST: config[CONF_HOST],
CONF_KEYFILE: config[CONF_KEYFILE],
CONF_CERTFILE: config[CONF_CERTFILE],
CONF_CA_CERTS: config[CONF_CA_CERTS],
},
)
)
return True
async def async_setup_entry(
hass: HomeAssistant, config_entry: config_entries.ConfigEntry
) -> bool:
"""Set up a bridge from a config entry."""
entry_id = config_entry.entry_id
host = config_entry.data[CONF_HOST]
keyfile = hass.config.path(config_entry.data[CONF_KEYFILE])
certfile = hass.config.path(config_entry.data[CONF_CERTFILE])
ca_certs = hass.config.path(config_entry.data[CONF_CA_CERTS])
bridge = None
try:
bridge = Smartbridge.create_tls(
hostname=host, keyfile=keyfile, certfile=certfile, ca_certs=ca_certs
)
except ssl.SSLError:
_LOGGER.error("Invalid certificate used to connect to bridge at %s", host)
return False
timed_out = True
with contextlib.suppress(asyncio.TimeoutError):
async with async_timeout.timeout(BRIDGE_TIMEOUT):
await bridge.connect()
timed_out = False
if timed_out or not bridge.is_connected():
await bridge.close()
if timed_out:
raise ConfigEntryNotReady(f"Timed out while trying to connect to {host}")
if not bridge.is_connected():
raise ConfigEntryNotReady(f"Cannot connect to {host}")
_LOGGER.debug("Connected to Lutron Caseta bridge via LEAP at %s", host)
devices = bridge.get_devices()
bridge_device = devices[BRIDGE_DEVICE_ID]
buttons = bridge.buttons
_async_register_bridge_device(hass, entry_id, bridge_device)
button_devices = _async_register_button_devices(
hass, entry_id, bridge_device, buttons
)
_async_subscribe_pico_remote_events(hass, bridge, buttons)
# Store this bridge (keyed by entry_id) so it can be retrieved by the
# platforms we're setting up.
hass.data[DOMAIN][entry_id] = {
BRIDGE_LEAP: bridge,
BRIDGE_DEVICE: bridge_device,
BUTTON_DEVICES: button_devices,
}
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
return True
@callback
def _async_register_bridge_device(
hass: HomeAssistant, config_entry_id: str, bridge_device: dict
) -> None:
"""Register the bridge device in the device registry."""
device_registry = dr.async_get(hass)
device_registry.async_get_or_create(
name=bridge_device["name"],
manufacturer=MANUFACTURER,
config_entry_id=config_entry_id,
identifiers={(DOMAIN, bridge_device["serial"])},
model=f"{bridge_device['model']} ({bridge_device['type']})",
configuration_url="https://device-login.lutron.com",
)
@callback
def _async_register_button_devices(
hass: HomeAssistant,
config_entry_id: str,
bridge_device,
button_devices_by_id: dict[int, dict],
) -> dict[str, dr.DeviceEntry]:
"""Register button devices (Pico Remotes) in the device registry."""
device_registry = dr.async_get(hass)
button_devices_by_dr_id = {}
seen = set()
for device in button_devices_by_id.values():
if "serial" not in device or device["serial"] in seen:
continue
seen.add(device["serial"])
dr_device = device_registry.async_get_or_create(
name=device["name"],
suggested_area=device["name"].split("_")[0],
manufacturer=MANUFACTURER,
config_entry_id=config_entry_id,
identifiers={(DOMAIN, device["serial"])},
model=f"{device['model']} ({device['type']})",
via_device=(DOMAIN, bridge_device["serial"]),
)
button_devices_by_dr_id[dr_device.id] = device
return button_devices_by_dr_id
@callback
def _async_subscribe_pico_remote_events(
hass: HomeAssistant,
bridge_device: Smartbridge,
button_devices_by_id: dict[int, dict],
):
"""Subscribe to lutron events."""
@callback
def _async_button_event(button_id, event_type):
device = button_devices_by_id.get(button_id)
if not device:
return
if event_type == BUTTON_STATUS_PRESSED:
action = ACTION_PRESS
else:
action = ACTION_RELEASE
type_ = device["type"]
name = device["name"]
button_number = device["button_number"]
# The original implementation used LIP instead of LEAP
# so we need to convert the button number to maintain compat
sub_type_to_lip_button = DEVICE_TYPE_SUBTYPE_MAP_TO_LIP[type_]
leap_button_to_sub_type = LEAP_TO_DEVICE_TYPE_SUBTYPE_MAP[type_]
if (sub_type := leap_button_to_sub_type.get(button_number)) is None:
_LOGGER.error(
"Unknown LEAP button number %s is not in %s for %s (%s)",
button_number,
leap_button_to_sub_type,
name,
type_,
)
return
lip_button_number = sub_type_to_lip_button[sub_type]
hass.bus.async_fire(
LUTRON_CASETA_BUTTON_EVENT,
{
ATTR_SERIAL: device["serial"],
ATTR_TYPE: type_,
ATTR_BUTTON_NUMBER: lip_button_number,
ATTR_LEAP_BUTTON_NUMBER: button_number,
ATTR_DEVICE_NAME: name,
ATTR_AREA_NAME: name.split("_")[0],
ATTR_ACTION: action,
},
)
for button_id in button_devices_by_id:
bridge_device.add_button_subscriber(
str(button_id),
lambda event_type, button_id=button_id: _async_button_event(
button_id, event_type
),
)
async def async_unload_entry(
hass: HomeAssistant, entry: config_entries.ConfigEntry
) -> bool:
"""Unload the bridge bridge from a config entry."""
data = hass.data[DOMAIN][entry.entry_id]
smartbridge: Smartbridge = data[BRIDGE_LEAP]
await smartbridge.close()
if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class LutronCasetaDevice(Entity):
"""Common base class for all Lutron Caseta devices."""
def __init__(self, device, bridge, bridge_device):
"""Set up the base class.
[:param]device the device metadata
[:param]bridge the smartbridge object
[:param]bridge_device a dict with the details of the bridge
"""
self._device = device
self._smartbridge = bridge
self._bridge_device = bridge_device
async def async_added_to_hass(self):
"""Register callbacks."""
self._smartbridge.add_subscriber(self.device_id, self.async_write_ha_state)
@property
def device_id(self):
"""Return the device ID used for calling pylutron_caseta."""
return self._device["device_id"]
@property
def name(self):
"""Return the name of the device."""
return self._device["name"]
@property
def serial(self):
"""Return the serial number of the device."""
return self._device["serial"]
@property
def unique_id(self):
"""Return the unique ID of the device (serial)."""
return str(self.serial)
@property
def device_info(self) -> DeviceInfo:
"""Return the device info."""
return DeviceInfo(
identifiers={(DOMAIN, self.serial)},
manufacturer=MANUFACTURER,
model=f"{self._device['model']} ({self._device['type']})",
name=self.name,
suggested_area=self._device["name"].split("_")[0],
via_device=(DOMAIN, self._bridge_device["serial"]),
configuration_url="https://device-login.lutron.com",
)
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {"device_id": self.device_id, "zone_id": self._device["zone"]}
@property
def should_poll(self):
"""No polling needed."""
return False
|
|
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Daniel Strohmeier <daniel.strohmeier@gmail.com>
#
# License: Simplified BSD
import numpy as np
from ..source_estimate import SourceEstimate, _BaseSourceEstimate, _make_stc
from ..minimum_norm.inverse import (combine_xyz, _prepare_forward,
_check_reference, _log_exp_var)
from ..forward import is_fixed_orient
from ..io.pick import pick_channels_evoked
from ..io.proj import deactivate_proj
from ..utils import (logger, verbose, _check_depth, _check_option, sum_squared,
_validate_type, check_random_state, warn)
from ..dipole import Dipole
from .mxne_optim import (mixed_norm_solver, iterative_mixed_norm_solver, _Phi,
tf_mixed_norm_solver, iterative_tf_mixed_norm_solver,
norm_l2inf, norm_epsilon_inf, groups_norm2)
def _check_ori(pick_ori, forward):
"""Check pick_ori."""
_check_option('pick_ori', pick_ori, [None, 'vector'])
if pick_ori == 'vector' and is_fixed_orient(forward):
raise ValueError('pick_ori="vector" cannot be combined with a fixed '
'orientation forward solution.')
def _prepare_weights(forward, gain, source_weighting, weights, weights_min):
mask = None
if isinstance(weights, _BaseSourceEstimate):
weights = np.max(np.abs(weights.data), axis=1)
weights_max = np.max(weights)
if weights_min > weights_max:
raise ValueError('weights_min > weights_max (%s > %s)' %
(weights_min, weights_max))
weights_min = weights_min / weights_max
weights = weights / weights_max
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
weights = np.ravel(np.tile(weights, [n_dip_per_pos, 1]).T)
if len(weights) != gain.shape[1]:
raise ValueError('weights do not have the correct dimension '
' (%d != %d)' % (len(weights), gain.shape[1]))
if len(source_weighting.shape) == 1:
source_weighting *= weights
else:
source_weighting *= weights[:, None]
gain *= weights[None, :]
if weights_min is not None:
mask = (weights > weights_min)
gain = gain[:, mask]
n_sources = np.sum(mask) // n_dip_per_pos
logger.info("Reducing source space to %d sources" % n_sources)
return gain, source_weighting, mask
def _prepare_gain(forward, info, noise_cov, pca, depth, loose, rank,
weights=None, weights_min=None):
depth = _check_depth(depth, 'depth_sparse')
forward, gain_info, gain, _, _, source_weighting, _, _, whitener = \
_prepare_forward(forward, info, noise_cov, 'auto', loose, rank, pca,
use_cps=True, **depth)
if weights is None:
mask = None
else:
gain, source_weighting, mask = _prepare_weights(
forward, gain, source_weighting, weights, weights_min)
return forward, gain, gain_info, whitener, source_weighting, mask
def _reapply_source_weighting(X, source_weighting, active_set):
X *= source_weighting[active_set][:, None]
return X
def _compute_residual(forward, evoked, X, active_set, info):
# OK, picking based on row_names is safe
sel = [forward['sol']['row_names'].index(c) for c in info['ch_names']]
residual = evoked.copy()
residual = pick_channels_evoked(residual, include=info['ch_names'])
r_tmp = residual.copy()
r_tmp.data = np.dot(forward['sol']['data'][sel, :][:, active_set], X)
# Take care of proj
active_projs = list()
non_active_projs = list()
for p in evoked.info['projs']:
if p['active']:
active_projs.append(p)
else:
non_active_projs.append(p)
if len(active_projs) > 0:
r_tmp.info['projs'] = deactivate_proj(active_projs, copy=True,
verbose=False)
r_tmp.apply_proj(verbose=False)
r_tmp.add_proj(non_active_projs, remove_existing=False, verbose=False)
residual.data -= r_tmp.data
return residual
@verbose
def _make_sparse_stc(X, active_set, forward, tmin, tstep,
active_is_idx=False, pick_ori=None, verbose=None):
source_nn = forward['source_nn']
vector = False
if not is_fixed_orient(forward):
if pick_ori != 'vector':
logger.info('combining the current components...')
X = combine_xyz(X)
else:
vector = True
source_nn = np.reshape(source_nn, (-1, 3, 3))
if not active_is_idx:
active_idx = np.where(active_set)[0]
else:
active_idx = active_set
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if n_dip_per_pos > 1:
active_idx = np.unique(active_idx // n_dip_per_pos)
src = forward['src']
vertices = []
n_points_so_far = 0
for this_src in src:
this_n_points_so_far = n_points_so_far + len(this_src['vertno'])
this_active_idx = active_idx[(n_points_so_far <= active_idx) &
(active_idx < this_n_points_so_far)]
this_active_idx -= n_points_so_far
this_vertno = this_src['vertno'][this_active_idx]
n_points_so_far = this_n_points_so_far
vertices.append(this_vertno)
source_nn = source_nn[active_idx]
return _make_stc(
X, vertices, src.kind, tmin, tstep, src[0]['subject_his_id'],
vector=vector, source_nn=source_nn)
def _split_gof(M, X, gain):
# parse out the variance explained using an orthogonal basis
# assuming x is estimated using elements of gain, with residual res
# along the first axis
assert M.ndim == X.ndim == gain.ndim == 2, (M.ndim, X.ndim, gain.ndim)
assert gain.shape == (M.shape[0], X.shape[0])
assert M.shape[1] == X.shape[1]
norm = (M * M.conj()).real.sum(0, keepdims=True)
norm[norm == 0] = np.inf
M_est = gain @ X
assert M.shape == M_est.shape
res = M - M_est
assert gain.shape[0] == M.shape[0], (gain.shape, M.shape)
# find an orthonormal basis for our matrices that spans the actual data
U, s, _ = np.linalg.svd(gain, full_matrices=False)
U = U[:, s >= s[0] * 1e-6]
# the part that gets explained
fit_orth = U.T @ M
# the part that got over-explained (landed in residual)
res_orth = U.T @ res
# determine the weights by projecting each one onto this basis
w = (U.T @ gain)[:, :, np.newaxis] * X
w_norm = np.linalg.norm(w, axis=1, keepdims=True)
w_norm[w_norm == 0] = 1.
w /= w_norm
# our weights are now unit-norm positive (will presrve power)
fit_back = np.linalg.norm(fit_orth[:, np.newaxis] * w, axis=0) ** 2
res_back = np.linalg.norm(res_orth[:, np.newaxis] * w, axis=0) ** 2
# and the resulting goodness of fits
gof_back = 100 * (fit_back - res_back) / norm
assert gof_back.shape == X.shape, (gof_back.shape, X.shape)
return gof_back
@verbose
def _make_dipoles_sparse(X, active_set, forward, tmin, tstep, M,
gain_active, active_is_idx=False,
verbose=None):
times = tmin + tstep * np.arange(X.shape[1])
if not active_is_idx:
active_idx = np.where(active_set)[0]
else:
active_idx = active_set
# Compute the GOF split amongst the dipoles
assert M.shape == (gain_active.shape[0], len(times))
assert gain_active.shape[1] == len(active_idx) == X.shape[0]
gof_split = _split_gof(M, X, gain_active)
assert gof_split.shape == (len(active_idx), len(times))
assert X.shape[0] in (len(active_idx), 3 * len(active_idx))
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if n_dip_per_pos > 1:
active_idx = active_idx // n_dip_per_pos
_, keep = np.unique(active_idx, return_index=True)
keep.sort() # maintain old order
active_idx = active_idx[keep]
gof_split.shape = (len(active_idx), n_dip_per_pos, len(times))
gof_split = gof_split.sum(1)
assert (gof_split < 100).all()
assert gof_split.shape == (len(active_idx), len(times))
dipoles = []
for k, i_dip in enumerate(active_idx):
i_pos = forward['source_rr'][i_dip][np.newaxis, :]
i_pos = i_pos.repeat(len(times), axis=0)
X_ = X[k * n_dip_per_pos: (k + 1) * n_dip_per_pos]
if n_dip_per_pos == 1:
amplitude = X_[0]
i_ori = forward['source_nn'][i_dip][np.newaxis, :]
i_ori = i_ori.repeat(len(times), axis=0)
else:
if forward['surf_ori']:
X_ = np.dot(forward['source_nn'][
i_dip * n_dip_per_pos:(i_dip + 1) * n_dip_per_pos].T, X_)
amplitude = np.linalg.norm(X_, axis=0)
i_ori = np.zeros((len(times), 3))
i_ori[amplitude > 0.] = (X_[:, amplitude > 0.] /
amplitude[amplitude > 0.]).T
dipoles.append(Dipole(times, i_pos, amplitude, i_ori, gof_split[k]))
return dipoles
@verbose
def make_stc_from_dipoles(dipoles, src, verbose=None):
"""Convert a list of spatio-temporal dipoles into a SourceEstimate.
Parameters
----------
dipoles : Dipole | list of instances of Dipole
The dipoles to convert.
src : instance of SourceSpaces
The source space used to generate the forward operator.
%(verbose)s
Returns
-------
stc : SourceEstimate
The source estimate.
"""
logger.info('Converting dipoles into a SourceEstimate.')
if isinstance(dipoles, Dipole):
dipoles = [dipoles]
if not isinstance(dipoles, list):
raise ValueError('Dipoles must be an instance of Dipole or '
'a list of instances of Dipole. '
'Got %s!' % type(dipoles))
tmin = dipoles[0].times[0]
tstep = dipoles[0].times[1] - tmin
X = np.zeros((len(dipoles), len(dipoles[0].times)))
source_rr = np.concatenate([_src['rr'][_src['vertno'], :] for _src in src],
axis=0)
n_lh_points = len(src[0]['vertno'])
lh_vertno = list()
rh_vertno = list()
for i in range(len(dipoles)):
if not np.all(dipoles[i].pos == dipoles[i].pos[0]):
raise ValueError('Only dipoles with fixed position over time '
'are supported!')
X[i] = dipoles[i].amplitude
idx = np.all(source_rr == dipoles[i].pos[0], axis=1)
idx = np.where(idx)[0][0]
if idx < n_lh_points:
lh_vertno.append(src[0]['vertno'][idx])
else:
rh_vertno.append(src[1]['vertno'][idx - n_lh_points])
vertices = [np.array(lh_vertno).astype(int),
np.array(rh_vertno).astype(int)]
stc = SourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep,
subject=src._subject)
logger.info('[done]')
return stc
@verbose
def mixed_norm(evoked, forward, noise_cov, alpha='sure', loose='auto',
depth=0.8, maxit=3000, tol=1e-4, active_set_size=10,
debias=True, time_pca=True, weights=None, weights_min=0.,
solver='auto', n_mxne_iter=1, return_residual=False,
return_as_dipoles=False, dgap_freq=10, rank=None, pick_ori=None,
sure_alpha_grid="auto", random_state=None, verbose=None):
"""Mixed-norm estimate (MxNE) and iterative reweighted MxNE (irMxNE).
Compute L1/L2 mixed-norm solution :footcite:`GramfortEtAl2012` or L0.5/L2
:footcite:`StrohmeierEtAl2016` mixed-norm solution on evoked data.
Parameters
----------
evoked : instance of Evoked or list of instances of Evoked
Evoked data to invert.
forward : dict
Forward operator.
noise_cov : instance of Covariance
Noise covariance to compute whitener.
alpha : float | str
Regularization parameter. If float it should be in the range [0, 100):
0 means no regularization, 100 would give 0 active dipole.
If ``'sure'`` (default), the SURE method from
:footcite:`DeledalleEtAl2014` will be used.
.. versionchanged:: 0.24
The default was changed to ``'sure'``.
%(loose)s
%(depth)s
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter.
active_set_size : int | None
Size of active set increment. If None, no active set strategy is used.
debias : bool
Remove coefficient amplitude bias due to L1 penalty.
time_pca : bool or int
If True the rank of the concatenated epochs is reduced to
its true dimension. If is 'int' the rank is limited to this value.
weights : None | array | SourceEstimate
Weight for penalty in mixed_norm. Can be None, a
1d array with shape (n_sources,), or a SourceEstimate (e.g. obtained
with wMNE, dSPM, or fMRI).
weights_min : float
Do not consider in the estimation sources for which weights
is less than weights_min.
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization. 'prox' stands for
proximal iterations using the FISTA algorithm, 'cd' uses
coordinate descent, and 'bcd' applies block coordinate descent.
'cd' is only available for fixed orientation.
n_mxne_iter : int
The number of MxNE iterations. If > 1, iterative reweighting
is applied.
return_residual : bool
If True, the residual is returned as an Evoked instance.
return_as_dipoles : bool
If True, the sources are returned as a list of Dipole instances.
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations. Ignored if
solver is 'cd'.
%(rank_None)s
.. versionadded:: 0.18
%(pick_ori)s
sure_alpha_grid : array | str
If ``'auto'`` (default), the SURE is evaluated along 15 uniformly
distributed alphas between alpha_max and 0.1 * alpha_max. If array, the
grid is directly specified. Ignored if alpha is not "sure".
.. versionadded:: 0.24
random_state : int | None
The random state used in a random number generator for delta and
epsilon used for the SURE computation. Defaults to None.
.. versionadded:: 0.24
%(verbose)s
Returns
-------
stc : SourceEstimate | list of SourceEstimate
Source time courses for each evoked data passed as input.
residual : instance of Evoked
The residual a.k.a. data not explained by the sources.
Only returned if return_residual is True.
See Also
--------
tf_mixed_norm
References
----------
.. footbibliography::
"""
from scipy import linalg
_validate_type(alpha, ('numeric', str), 'alpha')
if isinstance(alpha, str):
_check_option('alpha', alpha, ('sure',))
elif not 0. <= alpha < 100:
raise ValueError('If not equal to "sure" alpha must be in [0, 100). '
'Got alpha = %s' % alpha)
if n_mxne_iter < 1:
raise ValueError('MxNE has to be computed at least 1 time. '
'Requires n_mxne_iter >= 1, got %d' % n_mxne_iter)
if dgap_freq <= 0.:
raise ValueError('dgap_freq must be a positive integer.'
' Got dgap_freq = %s' % dgap_freq)
if not(isinstance(sure_alpha_grid, (np.ndarray, list)) or
sure_alpha_grid == "auto"):
raise ValueError('If not equal to "auto" sure_alpha_grid must be an '
'array. Got %s' % type(sure_alpha_grid))
if ((isinstance(sure_alpha_grid, str) and sure_alpha_grid != "auto")
and (isinstance(alpha, str) and alpha != "sure")):
raise Exception('If sure_alpha_grid is manually specified, alpha must '
'be "sure". Got %s' % alpha)
pca = True
if not isinstance(evoked, list):
evoked = [evoked]
_check_reference(evoked[0])
all_ch_names = evoked[0].ch_names
if not all(all_ch_names == evoked[i].ch_names
for i in range(1, len(evoked))):
raise Exception('All the datasets must have the same good channels.')
forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
forward, evoked[0].info, noise_cov, pca, depth, loose, rank,
weights, weights_min)
_check_ori(pick_ori, forward)
sel = [all_ch_names.index(name) for name in gain_info['ch_names']]
M = np.concatenate([e.data[sel] for e in evoked], axis=1)
# Whiten data
logger.info('Whitening data matrix.')
M = np.dot(whitener, M)
if time_pca:
U, s, Vh = linalg.svd(M, full_matrices=False)
if not isinstance(time_pca, bool) and isinstance(time_pca, int):
U = U[:, :time_pca]
s = s[:time_pca]
Vh = Vh[:time_pca]
M = U * s
# Scaling to make setting of tol and alpha easy
tol *= sum_squared(M)
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False)
alpha_max *= 0.01
gain /= alpha_max
source_weighting /= alpha_max
# Alpha selected automatically by SURE minimization
if alpha == "sure":
alpha_grid = sure_alpha_grid
if isinstance(sure_alpha_grid, str) and sure_alpha_grid == "auto":
alpha_grid = np.geomspace(100, 10, num=15)
X, active_set, best_alpha_ = _compute_mxne_sure(
M, gain, alpha_grid, sigma=1, random_state=random_state,
n_mxne_iter=n_mxne_iter, maxit=maxit, tol=tol,
n_orient=n_dip_per_pos, active_set_size=active_set_size,
debias=debias, solver=solver, dgap_freq=dgap_freq, verbose=verbose)
logger.info('Selected alpha: %s' % best_alpha_)
else:
if n_mxne_iter == 1:
X, active_set, E = mixed_norm_solver(
M, gain, alpha, maxit=maxit, tol=tol,
active_set_size=active_set_size, n_orient=n_dip_per_pos,
debias=debias, solver=solver, dgap_freq=dgap_freq,
verbose=verbose)
else:
X, active_set, E = iterative_mixed_norm_solver(
M, gain, alpha, n_mxne_iter, maxit=maxit, tol=tol,
n_orient=n_dip_per_pos, active_set_size=active_set_size,
debias=debias, solver=solver, dgap_freq=dgap_freq,
verbose=verbose)
if time_pca:
X = np.dot(X, Vh)
M = np.dot(M, Vh)
gain_active = gain[:, active_set]
if mask is not None:
active_set_tmp = np.zeros(len(mask), dtype=bool)
active_set_tmp[mask] = active_set
active_set = active_set_tmp
del active_set_tmp
if active_set.sum() == 0:
warn("No active dipoles found. alpha is too big.")
M_estimate = np.zeros_like(M)
else:
# Reapply weights to have correct unit
X = _reapply_source_weighting(X, source_weighting, active_set)
source_weighting[source_weighting == 0] = 1 # zeros
gain_active /= source_weighting[active_set]
del source_weighting
M_estimate = np.dot(gain_active, X)
outs = list()
residual = list()
cnt = 0
for e in evoked:
tmin = e.times[0]
tstep = 1.0 / e.info['sfreq']
Xe = X[:, cnt:(cnt + len(e.times))]
if return_as_dipoles:
out = _make_dipoles_sparse(
Xe, active_set, forward, tmin, tstep,
M[:, cnt:(cnt + len(e.times))],
gain_active)
else:
out = _make_sparse_stc(
Xe, active_set, forward, tmin, tstep, pick_ori=pick_ori)
outs.append(out)
cnt += len(e.times)
if return_residual:
residual.append(_compute_residual(forward, e, Xe, active_set,
gain_info))
_log_exp_var(M, M_estimate, prefix='')
logger.info('[done]')
if len(outs) == 1:
out = outs[0]
if return_residual:
residual = residual[0]
else:
out = outs
if return_residual:
out = out, residual
return out
def _window_evoked(evoked, size):
"""Window evoked (size in seconds)."""
if isinstance(size, (float, int)):
lsize = rsize = float(size)
else:
lsize, rsize = size
evoked = evoked.copy()
sfreq = float(evoked.info['sfreq'])
lsize = int(lsize * sfreq)
rsize = int(rsize * sfreq)
lhann = np.hanning(lsize * 2)[:lsize]
rhann = np.hanning(rsize * 2)[-rsize:]
window = np.r_[lhann, np.ones(len(evoked.times) - lsize - rsize), rhann]
evoked.data *= window[None, :]
return evoked
@verbose
def tf_mixed_norm(evoked, forward, noise_cov,
loose='auto', depth=0.8, maxit=3000,
tol=1e-4, weights=None, weights_min=0., pca=True,
debias=True, wsize=64, tstep=4, window=0.02,
return_residual=False, return_as_dipoles=False, alpha=None,
l1_ratio=None, dgap_freq=10, rank=None, pick_ori=None,
n_tfmxne_iter=1, verbose=None):
"""Time-Frequency Mixed-norm estimate (TF-MxNE).
Compute L1/L2 + L1 mixed-norm solution on time-frequency
dictionary. Works with evoked data
:footcite:`GramfortEtAl2013b,GramfortEtAl2011`.
Parameters
----------
evoked : instance of Evoked
Evoked data to invert.
forward : dict
Forward operator.
noise_cov : instance of Covariance
Noise covariance to compute whitener.
%(loose)s
%(depth)s
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter.
weights : None | array | SourceEstimate
Weight for penalty in mixed_norm. Can be None or
1d array of length n_sources or a SourceEstimate e.g. obtained
with wMNE or dSPM or fMRI.
weights_min : float
Do not consider in the estimation sources for which weights
is less than weights_min.
pca : bool
If True the rank of the data is reduced to true dimension.
debias : bool
Remove coefficient amplitude bias due to L1 penalty.
wsize : int or array-like
Length of the STFT window in samples (must be a multiple of 4).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep) and each entry of wsize must be a multiple
of 4. See :footcite:`BekhtiEtAl2016`.
tstep : int or array-like
Step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep), and each entry of tstep must be a multiple
of 2 and divide the corresponding entry of wsize. See
:footcite:`BekhtiEtAl2016`.
window : float or (float, float)
Length of time window used to take care of edge artifacts in seconds.
It can be one float or float if the values are different for left
and right window length.
return_residual : bool
If True, the residual is returned as an Evoked instance.
return_as_dipoles : bool
If True, the sources are returned as a list of Dipole instances.
alpha : float in [0, 100) or None
Overall regularization parameter.
If alpha and l1_ratio are not None, alpha_space and alpha_time are
overridden by alpha * alpha_max * (1. - l1_ratio) and alpha * alpha_max
* l1_ratio. 0 means no regularization, 100 would give 0 active dipole.
l1_ratio : float in [0, 1] or None
Proportion of temporal regularization.
If l1_ratio and alpha are not None, alpha_space and alpha_time are
overridden by alpha * alpha_max * (1. - l1_ratio) and alpha * alpha_max
* l1_ratio. 0 means no time regularization a.k.a. MxNE.
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations.
%(rank_None)s
.. versionadded:: 0.18
%(pick_ori)s
n_tfmxne_iter : int
Number of TF-MxNE iterations. If > 1, iterative reweighting is applied.
%(verbose)s
Returns
-------
stc : instance of SourceEstimate
Source time courses.
residual : instance of Evoked
The residual a.k.a. data not explained by the sources.
Only returned if return_residual is True.
See Also
--------
mixed_norm
References
----------
.. footbibliography::
"""
_check_reference(evoked)
all_ch_names = evoked.ch_names
info = evoked.info
if not (0. <= alpha < 100.):
raise ValueError('alpha must be in [0, 100). '
'Got alpha = %s' % alpha)
if not (0. <= l1_ratio <= 1.):
raise ValueError('l1_ratio must be in range [0, 1].'
' Got l1_ratio = %s' % l1_ratio)
alpha_space = alpha * (1. - l1_ratio)
alpha_time = alpha * l1_ratio
if n_tfmxne_iter < 1:
raise ValueError('TF-MxNE has to be computed at least 1 time. '
'Requires n_tfmxne_iter >= 1, got %s' % n_tfmxne_iter)
if dgap_freq <= 0.:
raise ValueError('dgap_freq must be a positive integer.'
' Got dgap_freq = %s' % dgap_freq)
tstep = np.atleast_1d(tstep)
wsize = np.atleast_1d(wsize)
if len(tstep) != len(wsize):
raise ValueError('The same number of window sizes and steps must be '
'passed. Got tstep = %s and wsize = %s' %
(tstep, wsize))
forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
forward, evoked.info, noise_cov, pca, depth, loose, rank,
weights, weights_min)
_check_ori(pick_ori, forward)
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if window is not None:
evoked = _window_evoked(evoked, window)
sel = [all_ch_names.index(name) for name in gain_info["ch_names"]]
M = evoked.data[sel]
# Whiten data
logger.info('Whitening data matrix.')
M = np.dot(whitener, M)
n_steps = np.ceil(M.shape[1] / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs, evoked.data.shape[1])
# Scaling to make setting of tol and alpha easy
tol *= sum_squared(M)
alpha_max = norm_epsilon_inf(gain, M, phi, l1_ratio, n_dip_per_pos)
alpha_max *= 0.01
gain /= alpha_max
source_weighting /= alpha_max
if n_tfmxne_iter == 1:
X, active_set, E = tf_mixed_norm_solver(
M, gain, alpha_space, alpha_time, wsize=wsize, tstep=tstep,
maxit=maxit, tol=tol, verbose=verbose, n_orient=n_dip_per_pos,
dgap_freq=dgap_freq, debias=debias)
else:
X, active_set, E = iterative_tf_mixed_norm_solver(
M, gain, alpha_space, alpha_time, wsize=wsize, tstep=tstep,
n_tfmxne_iter=n_tfmxne_iter, maxit=maxit, tol=tol, verbose=verbose,
n_orient=n_dip_per_pos, dgap_freq=dgap_freq, debias=debias)
if active_set.sum() == 0:
raise Exception("No active dipoles found. "
"alpha_space/alpha_time are too big.")
# Compute estimated whitened sensor data for each dipole (dip, ch, time)
gain_active = gain[:, active_set]
if mask is not None:
active_set_tmp = np.zeros(len(mask), dtype=bool)
active_set_tmp[mask] = active_set
active_set = active_set_tmp
del active_set_tmp
X = _reapply_source_weighting(X, source_weighting, active_set)
gain_active /= source_weighting[active_set]
if return_residual:
residual = _compute_residual(
forward, evoked, X, active_set, gain_info)
if return_as_dipoles:
out = _make_dipoles_sparse(
X, active_set, forward, evoked.times[0], 1.0 / info['sfreq'],
M, gain_active)
else:
out = _make_sparse_stc(
X, active_set, forward, evoked.times[0], 1.0 / info['sfreq'],
pick_ori=pick_ori)
logger.info('[done]')
if return_residual:
out = out, residual
return out
@verbose
def _compute_mxne_sure(M, gain, alpha_grid, sigma, n_mxne_iter, maxit, tol,
n_orient, active_set_size, debias, solver, dgap_freq,
random_state, verbose):
"""Stein Unbiased Risk Estimator (SURE).
Implements the finite-difference Monte-Carlo approximation
of the SURE for Multi-Task LASSO.
See reference :footcite:`DeledalleEtAl2014`.
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
gain : array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha_grid : array, shape (n_alphas,)
The grid of alphas used to evaluate the SURE.
sigma : float
The true or estimated noise level in the data. Usually 1 if the data
has been previously whitened using MNE whitener.
n_mxne_iter : int
The number of MxNE iterations. If > 1, iterative reweighting is
applied.
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter.
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
active_set_size : int
Size of active set increase at each iteration.
debias : bool
Debias source estimates.
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization.
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations.
random_state : int | None
The random state used in a random number generator for delta and
epsilon used for the SURE computation.
Returns
-------
X : array, shape (n_active, n_times)
Coefficient matrix.
active_set : array, shape (n_dipoles,)
Array of indices of non-zero coefficients.
best_alpha_ : float
Alpha that minimizes the SURE.
References
----------
.. footbibliography::
"""
def g(w):
return np.sqrt(np.sqrt(groups_norm2(w.copy(), n_orient)))
def gprime(w):
return 2. * np.repeat(g(w), n_orient).ravel()
def _run_solver(alpha, M, n_mxne_iter, as_init=None, X_init=None,
w_init=None):
if n_mxne_iter == 1:
X, active_set, _ = mixed_norm_solver(
M, gain, alpha, maxit=maxit, tol=tol,
active_set_size=active_set_size, n_orient=n_orient,
debias=debias, solver=solver, dgap_freq=dgap_freq,
active_set_init=as_init, X_init=X_init, verbose=False)
else:
X, active_set, _ = iterative_mixed_norm_solver(
M, gain, alpha, n_mxne_iter, maxit=maxit, tol=tol,
n_orient=n_orient, active_set_size=active_set_size,
debias=debias, solver=solver, dgap_freq=dgap_freq,
weight_init=w_init, verbose=False)
return X, active_set
def _fit_on_grid(gain, M, eps, delta):
coefs_grid_1_0 = np.zeros((len(alpha_grid), gain.shape[1], M.shape[1]))
coefs_grid_2_0 = np.zeros((len(alpha_grid), gain.shape[1], M.shape[1]))
active_sets, active_sets_eps = [], []
M_eps = M + eps * delta
# warm start - first iteration (leverages convexity)
logger.info('Warm starting...')
for j, alpha in enumerate(alpha_grid):
logger.info('alpha: %s' % alpha)
X, a_set = _run_solver(alpha, M, 1)
X_eps, a_set_eps = _run_solver(alpha, M_eps, 1)
coefs_grid_1_0[j][a_set, :] = X
coefs_grid_2_0[j][a_set_eps, :] = X_eps
active_sets.append(a_set)
active_sets_eps.append(a_set_eps)
# next iterations
if n_mxne_iter == 1:
return coefs_grid_1_0, coefs_grid_2_0, active_sets
else:
coefs_grid_1 = coefs_grid_1_0.copy()
coefs_grid_2 = coefs_grid_2_0.copy()
logger.info('Fitting SURE on grid.')
for j, alpha in enumerate(alpha_grid):
logger.info('alpha: %s' % alpha)
if active_sets[j].sum() > 0:
w = gprime(coefs_grid_1[j])
X, a_set = _run_solver(alpha, M, n_mxne_iter - 1,
w_init=w)
coefs_grid_1[j][a_set, :] = X
active_sets[j] = a_set
if active_sets_eps[j].sum() > 0:
w_eps = gprime(coefs_grid_2[j])
X_eps, a_set_eps = _run_solver(alpha, M_eps,
n_mxne_iter - 1,
w_init=w_eps)
coefs_grid_2[j][a_set_eps, :] = X_eps
active_sets_eps[j] = a_set_eps
return coefs_grid_1, coefs_grid_2, active_sets
def _compute_sure_val(coef1, coef2, gain, M, sigma, delta, eps):
n_sensors, n_times = gain.shape[0], M.shape[1]
dof = (gain @ (coef2 - coef1) * delta).sum() / eps
df_term = np.linalg.norm(M - gain @ coef1) ** 2
sure = df_term - n_sensors * n_times * sigma ** 2
sure += 2 * dof * sigma ** 2
return sure
sure_path = np.empty(len(alpha_grid))
rng = check_random_state(random_state)
# See Deledalle et al. 20214 Sec. 5.1
eps = 2 * sigma / (M.shape[0] ** 0.3)
delta = rng.randn(*M.shape)
coefs_grid_1, coefs_grid_2, active_sets = _fit_on_grid(gain, M, eps, delta)
logger.info("Computing SURE values on grid.")
for i, (coef1, coef2) in enumerate(zip(coefs_grid_1, coefs_grid_2)):
sure_path[i] = _compute_sure_val(
coef1, coef2, gain, M, sigma, delta, eps)
if verbose:
logger.info("alpha %s :: sure %s" % (alpha_grid[i], sure_path[i]))
best_alpha_ = alpha_grid[np.argmin(sure_path)]
X = coefs_grid_1[np.argmin(sure_path)]
active_set = active_sets[np.argmin(sure_path)]
X = X[active_set, :]
return X, active_set, best_alpha_
|
|
#!/usr/bin/env python3
import time
import cv2
import numpy as np
import sys
import pickle
e = time.time()
debug = False
fileWrite = False
if fileWrite:
fWPath = "processed/" + str(time.time()) + "-processed.jpg"
displayProcessed = False
if debug:
print ("imports: " + str(format(time.time() - e, '.5f')))
start = time.time()
serialFile = "../pickle.txt"
H, S, L, R, G, B = "H", "S", "L", "R", "G", "B" # I hate typing quotes
l, u = "l", "u" # Lower & Upper
# cc = {H: {l: 16, u: 113},
# S: {l: 131, u: 255},
# L: {l: 151, u: 255},
# R: {l: 189, u: 255},
# G: {l: 193, u: 255},
# B: {l: 204, u: 255}}
cc = {H: {l: 44, u: 93},
S: {l: 25, u: 203},
L: {l: 95, u: 220},
R: {l: 0, u: 192},
G: {l: 210, u: 255},
B: {l: 177, u: 255}}
# print (cc[H][l], cc[S][l], cc[L][l])
# print (cc[H][u], cc[S][u], cc[L][u])
# print (cc[R][l], cc[G][l], cc[B][l])
# print (cc[R][u], cc[G][u], cc[B][u])
# a = threshHSL(srcImg, [cc[H][l], cc[S][l], cc[L][l]],
# [cc[H][u], cc[S][u], cc[L][u]]) # HSL thresh lower/upper
# if debug:
# print ("HSL: " + str(format(time.time() - start, '.5f')))
# start = time.time()
# b = threshRGB(srcImg, [cc[R][l], cc[G][l], cc[B][l]],
# [cc[R][u], cc[G][u], cc[B][u]]) # RGB lower/upper
# Note: System arguments should take the form of an IP address of the video
# capture feed
# srcImg = cv2.VideoCapture() # Define srcImg as image/video capture
#
# if len(sys.argv) != 2:
# print("Error: specify an URL to connect to")
# exit(0)
#
# url = sys.argv[1]
#
# srcImg.open("http://127.0.0.1:8080/stream.wmv")
# ret, frameImg = srcImg.read() # Test
# imgY, imgX, imgChannels = frameImg.shape
srcImg = cv2.imread("/home/solomon/frc/the-deal/pythonCV/RealFullField/" +
sys.argv[1] + ".jpg", 1)
if debug:
print ("Read image: " + str(format(time.time() - start, '.5f')))
start = time.time()
def percentFromResolution(srcImg, yTargetRes, xTargetRes):
imgY, imgX, imgChannels = srcImg.shape
modPercentX = float(xTargetRes) / imgX
modPercentY = float(yTargetRes) / imgY
return [modPercentY, modPercentX]
def imgScale(toScale, percentX, percentY):
scaledImg = cv2.resize(toScale, None, fx=percentX, fy=percentY,
interpolation=cv2.INTER_CUBIC) # MaybeTry INTER_AREA
return scaledImg
def threshHSL(imgSrc, lower, upper):
"""Returns binary mask of image based on HSL bounds"""
imgSrcHLS = cv2.cvtColor(imgSrc, cv2.COLOR_BGR2HLS)
npLower = np.array([lower[0], lower[2], lower[1]]) # Compesate for HLSvsHSL
npUpper = np.array([upper[0], upper[2], upper[1]])
tmp = cv2.inRange(imgSrcHLS, npLower, npUpper)
return tmp
def threshRGB(imgSrc, lower, upper):
"""Returns binary mask of image based on RGB bounds"""
imgSrcRGB = cv2.cvtColor(imgSrc, cv2.COLOR_BGR2RGB)
npLower = np.array([lower[0], lower[1], lower[2]])
npUpper = np.array([upper[0], upper[1], upper[2]])
tmp = cv2.inRange(imgSrcRGB, npLower, npUpper)
return tmp
def cvAdd(img1, img2):
"""Returns addition of 2 images"""
tmp = cv2.add(img1, img2)
return tmp
def findContours(img):
"""Finds contours in image, preferably binary image"""
_, contours, hierarchy = \
cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
return contours, hierarchy
def imgUntilQ(srcImg):
cv2.imshow('e', srcImg)
while True:
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
if debug:
print ("function defs: " + str(format(time.time() - start, '.5f')))
start = time.time()
# srcImg = imgScale(srcImg, percentFromResolution(srcImg, 240, 320)[0],
# percentFromResolution(srcImg, 240, 320)[1])
multiplier = 5
srcImg = imgScale(srcImg, percentFromResolution(srcImg,
srcImg.shape[0]*multiplier,
srcImg.shape[1]*multiplier)[0],
percentFromResolution(srcImg,
srcImg.shape[0]*multiplier,
srcImg.shape[1]*multiplier)[1])
# srcImg = cv2.resize(srcImg, None, fx=.5, fy=.5, interpolation=cv2.INTER_CUBIC)
if debug:
print ("Scale: " + str(format(time.time() - start, '.5f')))
start = time.time()
srcImg = cv2.GaussianBlur(srcImg, (1, 1), 1)
if debug:
print ("Blur: " + str(format(time.time() - start, '.5f')))
start = time.time()
a = threshHSL(srcImg, [cc[H][l], cc[S][l], cc[L][l]],
[cc[H][u], cc[S][u], cc[L][u]]) # HSL thresh lower/upper
if debug:
print ("HSL: " + str(format(time.time() - start, '.5f')))
start = time.time()
b = threshRGB(srcImg, [cc[R][l], cc[G][l], cc[B][l]],
[cc[R][u], cc[G][u], cc[B][u]]) # RGB lower/upper
if debug:
print ("RGB: " + str(format(time.time() - start, '.5f')))
start = time.time()
c = cvAdd(a, b)
# imgUntilQ(c)
if debug:
print ("Add: " + str(format(time.time() - start, '.5f')))
start = time.time()
d = c
contours, hiearchy = findContours(d)
if debug:
print ("Contours: " + str(format(time.time() - start, '.5f')))
start = time.time()
tmpVar = 0
# while len(contours) > 1: # this inefficient mess finds the biggest contour
# # (I think)
# for z in range(0, len(contours)):
# try:
# if cv2.contourArea(contours[z]) <= tmpVar:
# contours.pop(z)
# except IndexError:
# break
# # print (str(tmpVar) + ": " + str(len(contours)) + ": " + str(z))
# tmpVar += 1
#
# if debug:
# print ("Found biggest: " + str(format(time.time() - start, '.5f')))
# start = time.time()
# for x in contours:
# print (cv2.contourArea(x))
# print("\n")
contoursSorted = sorted(contours,
key=lambda x: cv2.contourArea(x), reverse=True)
# print (contours[0])
# print (contoursSorted)
contours = contoursSorted[0:5]
if debug:
print ("Found biggest w/ better algorithm: " + str(format(time.time() -
start, '.5f')))
start = time.time()
# rect = cv2.minAreaRect(contours[0])
# box = cv2.cv.BoxPoints(rect)
# box = np.int0(box)
# cv2.drawContours(srcImg, [box], 0, (0, 255, 0), 2)
#
# rows, cols = srcImg.shape[:2]
# [vx, vy, x, y] = cv2.fitLine(contours[0], cv2.cv.CV_DIST_L2, 0, 0.01, 0.01)
# lefty = int((-x*vy/vx) + y)
# righty = int(((cols-x)*vy/vx)+y)
# cv2.line(srcImg, (cols-1, righty), (0, lefty), (255, 0, 0), 2)
hull = cv2.convexHull(contours[0], returnPoints=True)
if debug:
print ("Convex hull: " + str(format(time.time() - start, '.5f')))
start = time.time()
(count, _, _) = hull.shape
hull.ravel()
hull.shape = (count, 2)
tmpVar = 0
itera = 0
maxIter = 256
iii = len(cv2.approxPolyDP(hull, tmpVar, True))
while iii != 4:
if iii > 4:
tmpVar += 1
elif iii < 4:
tmpVar -= 1
itera += 1
if itera >= maxIter:
break
iii = len(cv2.approxPolyDP(hull, tmpVar, True))
approx = cv2.approxPolyDP(hull, tmpVar, True)
if debug:
print ("Found quadrangle: " + str(format(time.time() - start, '.5f')))
start = time.time()
# if debug:
cv2.drawContours(srcImg, contours, -1, (0, 0, 255), 1)
cv2.polylines(srcImg, np.int32([hull]), True, (0, 255, 0), 1)
cv2.drawContours(srcImg, approx, -1, (0, 255, 0), 3)
for x in range(0, len(approx)):
# print (x)
# print (approx[x][0][0])
cv2.putText(srcImg,
" " + str(x) + ": (" + str(approx[x][0][0]) +
", " + str(approx[x][0][1]) + ")",
(approx[x][0][0], approx[x][0][1]),
cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 255), 1)
if debug:
print ("Drew image: " + str(format(time.time() - start, '.5f')))
start = time.time()
if debug:
print ("Wrote image: " + str(format(time.time() - start, '.5f')))
start = time.time()
if fileWrite:
cv2.imwrite(fWPath, srcImg)
# Starting to calculate stuff for NT publishing.
# Items to be published:
# Center of box/contour (maybe avg them)
# 4 points
# Slopes of angles of sides of box
# Box height
# Box width
# Planned output:
# [center, (p1, p2, p3, p4), (Mp1, Mp2, Mp3, Mp4), (height, width)]
p1, p2, p3, p4 = [approx[0][0][0], approx[0][0][1]], \
[approx[1][0][0], approx[1][0][1]], \
[approx[2][0][0], approx[2][0][1]], \
[approx[3][0][0], approx[3][0][1]]
xSize = 0
ySize = 0
pointArr = [p1, p2, p3, p4]
leftPoints = sorted(pointArr)[:2]
rightPoints = sorted(pointArr)[2:]
topPoints = sorted(sorted(pointArr, key=lambda x: x[1])[:2])
bottomPoints = sorted(sorted(pointArr, key=lambda x: x[1])[2:])
xSize = sorted(pointArr)[-1][0] - sorted(pointArr)[0][0]
ySize = sorted(pointArr, key=lambda x: x[1], reverse=True)[0][1] - \
sorted(pointArr, key=lambda x: x[1])[0][1]
approxMoments = cv2.moments(approx)
contourMoments = cv2.moments(contours[0])
approxCentroidY = int(approxMoments['m01']/approxMoments['m00'])
approxCentroidX = int(approxMoments['m10']/approxMoments['m00'])
cv2.circle(srcImg, (approxCentroidX, approxCentroidY), 5, (255, 0, 255))
# print (p1, p2, p3, p4)
leftSlope, rightSlope, topSlope, bottomSlope = \
format((leftPoints[1][1] - leftPoints[0][1]) /
float(leftPoints[1][0] - leftPoints[0][0]), '.2f'),\
format((rightPoints[1][1] - rightPoints[0][1]) /
float(rightPoints[1][0] - rightPoints[0][0]), '.2f'),\
format((topPoints[1][1] - topPoints[0][1]) /
float(topPoints[1][0] - topPoints[0][0]), '.2f'),\
format((bottomPoints[1][1] - bottomPoints[0][1]) /
float(bottomPoints[1][0] - bottomPoints[0][0]), '.2f')
# print (leftPoints[1][1], leftPoints[0][1])
# print (leftPoints[1][0], leftPoints[0][0])
# print (leftSlope, rightSlope, topSlope, bottomSlope)
finalDict = {}
finalDict["approxCentroidX"] = int(approxCentroidX)
finalDict["approxCentroidY"] = int(approxCentroidY)
finalDict["xSize"] = int(xSize)
finalDict["ySize"] = int(ySize)
finalDict["p1"] = (int(p1[0]), int(p1[1]))
finalDict["p2"] = (int(p2[0]), int(p2[1]))
finalDict["p3"] = (int(p3[0]), int(p3[1]))
finalDict["p4"] = (int(p4[0]), int(p4[1]))
finalDict["leftSlope"] = float(leftSlope)
finalDict["rightSlope"] = float(rightSlope)
finalDict["topSlope"] = float(topSlope)
finalDict["bottomSlope"] = float(bottomSlope)
# print (str(leftSlope) + ", " + str(rightSlope) + ", " + str(topSlope) + ", " +
# str(bottomSlope))
# Side slopes
if debug:
print ("Made dict: " + str(format(time.time() - start, '.5f')))
start = time.time()
with open(serialFile, 'wb') as j:
# pickle.dump(finalList, j)
pickle.dump(finalDict, j, 2)
if debug:
print ("Dumped pickle: " + str(format(time.time() - start, '.5f')))
start = time.time()
print ("Total time: " + str(time.time() - e))
if displayProcessed:
imgUntilQ(srcImg)
print (finalDict)
|
|
# Copyright 2021 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions."""
import re
from typing import Optional, Sequence, Tuple
import chex
import einops
import haiku as hk
import jax
import jax.numpy as jnp
import optax
def get_cosine_schedule(
max_learning_rate: float,
total_steps: int,
warmup_steps: int = 0) -> optax.Schedule:
"""Builds a cosine decay schedule with initial warm-up."""
if total_steps < warmup_steps:
return optax.linear_schedule(init_value=0., end_value=max_learning_rate,
transition_steps=warmup_steps)
return optax.join_schedules([
optax.linear_schedule(init_value=0., end_value=max_learning_rate,
transition_steps=warmup_steps),
optax.cosine_decay_schedule(init_value=max_learning_rate,
decay_steps=total_steps - warmup_steps),
], [warmup_steps])
def get_step_schedule(
max_learning_rate: float,
total_steps: int,
warmup_steps: int = 0) -> optax.Schedule:
"""Builds a step schedule with initial warm-up."""
if total_steps < warmup_steps:
return optax.linear_schedule(init_value=0., end_value=max_learning_rate,
transition_steps=warmup_steps)
return optax.join_schedules([
optax.linear_schedule(init_value=0., end_value=max_learning_rate,
transition_steps=warmup_steps),
optax.piecewise_constant_schedule(
init_value=max_learning_rate,
boundaries_and_scales={total_steps * 2 // 3: .1}),
], [warmup_steps])
def sgd_momentum(learning_rate_fn: optax.Schedule,
momentum: float = 0.,
nesterov: bool = False) -> optax.GradientTransformation:
return optax.chain(
optax.trace(decay=momentum, nesterov=nesterov),
optax.scale_by_schedule(learning_rate_fn),
optax.scale(-1.))
def cross_entropy(logits: chex.Array, labels: chex.Array) -> chex.Array:
return -jnp.sum(labels * jax.nn.log_softmax(logits), axis=-1)
def kl_divergence(q_logits: chex.Array,
p_logits: chex.Array) -> chex.Array:
"""Compute the KL divergence."""
p_probs = jax.nn.softmax(p_logits)
return cross_entropy(q_logits, p_probs) - cross_entropy(p_logits, p_probs)
def accuracy(logits: chex.Array, labels: chex.Array) -> chex.Array:
predicted_label = jnp.argmax(logits, axis=-1)
correct = jnp.equal(predicted_label, labels).astype(jnp.float32)
return jnp.sum(correct, axis=0) / logits.shape[0]
def weight_decay(params: hk.Params,
regex_match: Optional[Sequence[str]] = None,
regex_ignore: Optional[Sequence[str]] = None) -> chex.Array:
"""Computes the L2 regularization loss."""
if regex_match is None:
regex_match = ('.*w$', '.*b$')
if regex_ignore is None:
regex_ignore = ('.*batchnorm.*',)
l2_norm = 0.
for mod_name, mod_params in params.items():
for param_name, param in mod_params.items():
name = '/'.join([mod_name, param_name])
if (regex_match and
all(not re.match(regex, name) for regex in regex_match)):
continue
if (regex_ignore and
any(re.match(regex, name) for regex in regex_ignore)):
continue
l2_norm += jnp.sum(jnp.square(param))
return .5 * l2_norm
def ema_update(step: chex.Array,
avg_params: chex.ArrayTree,
new_params: chex.ArrayTree,
decay_rate: float = 0.99,
warmup_steps: int = 0,
dynamic_decay: bool = True) -> chex.ArrayTree:
"""Applies an exponential moving average."""
factor = (step >= warmup_steps).astype(jnp.float32)
if dynamic_decay:
# Uses TF-style EMA.
delta = step - warmup_steps
decay = jnp.minimum(decay_rate, (1. + delta) / (10. + delta))
else:
decay = decay_rate
decay *= factor
def _weighted_average(p1, p2):
d = decay.astype(p1.dtype)
return (1 - d) * p1 + d * p2
return jax.tree_multimap(_weighted_average, new_params, avg_params)
def cutmix(rng: chex.PRNGKey,
images: chex.Array,
labels: chex.Array,
alpha: float = 1.,
beta: float = 1.,
split: int = 1) -> Tuple[chex.Array, chex.Array]:
"""Composing two images by inserting a patch into another image."""
batch_size, height, width, _ = images.shape
split_batch_size = batch_size // split if split > 1 else batch_size
# Masking bounding box.
box_rng, lam_rng, rng = jax.random.split(rng, num=3)
lam = jax.random.beta(lam_rng, a=alpha, b=beta, shape=())
cut_rat = jnp.sqrt(1. - lam)
cut_w = jnp.array(width * cut_rat, dtype=jnp.int32)
cut_h = jnp.array(height * cut_rat, dtype=jnp.int32)
box_coords = _random_box(box_rng, height, width, cut_h, cut_w)
# Adjust lambda.
lam = 1. - (box_coords[2] * box_coords[3] / (height * width))
idx = jax.random.permutation(rng, split_batch_size)
def _cutmix(x, y):
images_a = x
images_b = x[idx, :, :, :]
y = lam * y + (1. - lam) * y[idx, :]
x = _compose_two_images(images_a, images_b, box_coords)
return x, y
if split <= 1:
return _cutmix(images, labels)
# Apply CutMix separately on each sub-batch. This reverses the effect of
# `repeat` in datasets.
images = einops.rearrange(images, '(b1 b2) ... -> b1 b2 ...', b2=split)
labels = einops.rearrange(labels, '(b1 b2) ... -> b1 b2 ...', b2=split)
images, labels = jax.vmap(_cutmix, in_axes=1, out_axes=1)(images, labels)
images = einops.rearrange(images, 'b1 b2 ... -> (b1 b2) ...', b2=split)
labels = einops.rearrange(labels, 'b1 b2 ... -> (b1 b2) ...', b2=split)
return images, labels
def _random_box(rng: chex.PRNGKey,
height: chex.Numeric,
width: chex.Numeric,
cut_h: chex.Array,
cut_w: chex.Array) -> chex.Array:
"""Sample a random box of shape [cut_h, cut_w]."""
height_rng, width_rng = jax.random.split(rng)
i = jax.random.randint(
height_rng, shape=(), minval=0, maxval=height, dtype=jnp.int32)
j = jax.random.randint(
width_rng, shape=(), minval=0, maxval=width, dtype=jnp.int32)
bby1 = jnp.clip(i - cut_h // 2, 0, height)
bbx1 = jnp.clip(j - cut_w // 2, 0, width)
h = jnp.clip(i + cut_h // 2, 0, height) - bby1
w = jnp.clip(j + cut_w // 2, 0, width) - bbx1
return jnp.array([bby1, bbx1, h, w])
def _compose_two_images(images: chex.Array,
image_permutation: chex.Array,
bbox: chex.Array) -> chex.Array:
"""Inserting the second minibatch into the first at the target locations."""
def _single_compose_two_images(image1, image2):
height, width, _ = image1.shape
mask = _window_mask(bbox, (height, width))
return image1 * (1. - mask) + image2 * mask
return jax.vmap(_single_compose_two_images)(images, image_permutation)
def _window_mask(destination_box: chex.Array,
size: Tuple[int, int]) -> jnp.ndarray:
"""Mask a part of the image."""
height_offset, width_offset, h, w = destination_box
h_range = jnp.reshape(jnp.arange(size[0]), [size[0], 1, 1])
w_range = jnp.reshape(jnp.arange(size[1]), [1, size[1], 1])
return jnp.logical_and(
jnp.logical_and(height_offset <= h_range,
h_range < height_offset + h),
jnp.logical_and(width_offset <= w_range,
w_range < width_offset + w)).astype(jnp.float32)
|
|
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import dot_sequences
from sfepy.mechanics.tensors import dim2sym, transform_data
import sfepy.mechanics.membranes as membranes
from sfepy.terms.terms import Term
def eval_membrane_mooney_rivlin(a1, a2, mtx_c, c33, mode):
"""
Evaluate stress or tangent stiffness of the Mooney-Rivlin membrane.
[1] Baoguo Wu, Xingwen Du and Huifeng Tan: A three-dimensional FE
nonlinear analysis of membranes, Computers & Structures 59 (1996),
no. 4, 601--605.
"""
a12 = 2.0 * a1[..., 0, 0]
a22 = 2.0 * a2[..., 0, 0]
sh = mtx_c.shape
sym = dim2sym(sh[2])
c11 = mtx_c[..., 0, 0]
c12 = mtx_c[..., 0, 1]
c22 = mtx_c[..., 1, 1]
pressure = c33 * (a12 + a22 * (c11 + c22))
if mode == 0:
out = nm.empty((sh[0], sh[1], sym, 1))
# S_11, S_22, S_12.
out[..., 0, 0] = -pressure * c22 * c33 + a12 + a22 * (c22 + c33)
out[..., 1, 0] = -pressure * c11 * c33 + a12 + a22 * (c11 + c33)
out[..., 2, 0] = +pressure * c12 * c33 - a22 * c12
else:
out = nm.empty((sh[0], sh[1], sym, sym))
dp11 = a22 * c33 - pressure * c22 * c33
dp22 = a22 * c33 - pressure * c11 * c33
dp12 = 2.0 * pressure * c12 * c33
# D_11, D_22, D_33
out[..., 0, 0] = - 2.0 * ((a22 - pressure * c22) * c22 * c33**2
+ c33 * c22 * dp11)
out[..., 1, 1] = - 2.0 * ((a22 - pressure * c11) * c11 * c33**2
+ c33 * c11 * dp22)
out[..., 2, 2] = - a22 + pressure * (c33 + 2.0 * c12**2 * c33**2) \
+ c12 * c33 * dp12
# D_21, D_31, D_32
out[..., 1, 0] = 2.0 * ((a22 - pressure * c33
- (a22 - pressure * c11) * c22 * c33**2)
- c33 * c11 * dp11)
out[..., 2, 0] = 2.0 * (-pressure * c12 * c22 * c33**2
+ c12 * c33 * dp11)
out[..., 2, 1] = 2.0 * (-pressure * c12 * c11 * c33**2
+ c12 * c33 * dp22)
out[..., 0, 1] = out[..., 1, 0]
out[..., 0, 2] = out[..., 2, 0]
out[..., 1, 2] = out[..., 2, 1]
# D_12, D_13, D_23
## out[..., 0, 1] = 2.0 * ((a22 - pressure * c33
## - (a22 - pressure * c22) * c11 * c33**2)
## - c33 * c22 * dp22)
## out[..., 0, 2] = 2.0 * (a22 - pressure * c22) * c12 * c33**2 \
## - c33 * c22 * dp12
## out[..., 1, 2] = 2.0 * (a22 - pressure * c11) * c12 * c33**2 \
## - c33 * c11 * dp12
return out
class TLMembraneTerm(Term):
r"""
Mooney-Rivlin membrane with plain stress assumption.
The membrane has a uniform initial thickness :math:`h_0` and obeys a
hyperelastic material law with strain energy by Mooney-Rivlin: :math:`\Psi
= a_1 (I_1 - 3) + a_2 (I_2 - 3)`.
:Arguments:
- material_a1 : :math:`a_1`
- material_a2 : :math:`a_2`
- material_h0 : :math:`h_0`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_tl_membrane'
arg_types = ('material_a1', 'material_a2', 'material_h0',
'virtual', 'state')
arg_shapes = {'material_a1' : '1, 1', 'material_a2' : '1, 1',
'material_h0' : '1, 1',
'virtual' : ('D', 'state'), 'state' : 'D'}
geometries = ['3_4', '3_8']
integration = 'surface'
@staticmethod
def function(out, fun, *args):
"""
Notes
-----
`fun` is either `weak_function` or `eval_function` according to
evaluation mode.
"""
return fun(out, *args)
@staticmethod
def weak_function(out, a1, a2, h0, mtx_c, c33, mtx_b, mtx_t, bfg, geo,
fmode):
crt = eval_membrane_mooney_rivlin(a1, a2, mtx_c, c33, fmode)
if fmode == 0:
bts = dot_sequences(mtx_b, crt, 'ATB')
status = geo.integrate(out, bts * h0)
membranes.transform_asm_vectors(out, mtx_t)
else:
btd = dot_sequences(mtx_b, crt, 'ATB')
btdb = dot_sequences(btd, mtx_b)
stress = eval_membrane_mooney_rivlin(a1, a2, mtx_c, c33, 0)
kts = membranes.get_tangent_stress_matrix(stress, bfg)
mtx_k = kts + btdb
status = geo.integrate(out, mtx_k * h0)
membranes.transform_asm_matrices(out, mtx_t)
return status
@staticmethod
def eval_function(out, a1, a2, h0, mtx_c, c33, mtx_b, mtx_t, geo,
term_mode, fmode):
if term_mode == 'strain':
out_qp = membranes.get_green_strain_sym3d(mtx_c, c33)
elif term_mode == 'stress':
n_el, n_qp, dm, _ = mtx_c.shape
dim = dm + 1
sym = dim2sym(dim)
out_qp = nm.zeros((n_el, n_qp, sym, 1), dtype=mtx_c.dtype)
stress = eval_membrane_mooney_rivlin(a1, a2, mtx_c, c33, 0)
out_qp[..., 0:2, 0] = stress[..., 0:2, 0]
out_qp[..., 3, 0] = stress[..., 2, 0]
status = geo.integrate(out, out_qp, fmode)
out[:, 0, :, 0] = transform_data(out.squeeze(), mtx=mtx_t)
return status
def __init__(self, *args, **kwargs):
Term.__init__(self, *args, **kwargs)
self.mtx_t = None
self.membrane_geo = None
self.bfg = None
def get_fargs(self, a1, a2, h0, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vv, vu = virtual, state
sg, _ = self.get_mapping(vv)
sd = vv.field.surface_data[self.region.name]
if self.mtx_t is None:
aux = membranes.describe_geometry(vu.field,
self.region, self.integral)
self.mtx_t, self.membrane_geo = aux
# Transformed base function gradient w.r.t. material coordinates
# in quadrature points.
self.bfg = self.membrane_geo.bfg
mtx_t = self.mtx_t
bfg = self.bfg
geo = self.membrane_geo
# Displacements of element nodes.
vec_u = vu.get_state_in_region(self.region)
el_u = vec_u[sd.leconn]
# Transform displacements to the local coordinate system.
# u_new = T^T u
el_u_loc = dot_sequences(el_u, mtx_t, 'AB')
## print el_u_loc
mtx_c, c33, mtx_b = membranes.describe_deformation(el_u_loc, bfg)
if mode == 'weak':
fmode = diff_var is not None
return (self.weak_function,
a1, a2, h0, mtx_c, c33, mtx_b, mtx_t, bfg, geo, fmode)
else:
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
assert_(term_mode in ['strain', 'stress'])
return (self.eval_function,
a1, a2, h0, mtx_c, c33, mtx_b, mtx_t, geo, term_mode, fmode)
def get_eval_shape(self, a1, a2, h0, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
sym = dim2sym(dim)
return (n_el, 1, sym, 1), state.dtype
|
|
# This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Miscellaneous utility functions."""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import os
import sys
import re
import shutil
import fnmatch
from collections import Counter
import traceback
import subprocess
import platform
import shlex
MAX_FILENAME_LENGTH = 200
WINDOWS_MAGIC_PREFIX = u'\\\\?\\'
class HumanReadableException(Exception):
"""An Exception that can include a human-readable error message to
be logged without a traceback. Can preserve a traceback for
debugging purposes as well.
Has at least two fields: `reason`, the underlying exception or a
string describing the problem; and `verb`, the action being
performed during the error.
If `tb` is provided, it is a string containing a traceback for the
associated exception. (Note that this is not necessary in Python 3.x
and should be removed when we make the transition.)
"""
error_kind = 'Error' # Human-readable description of error type.
def __init__(self, reason, verb, tb=None):
self.reason = reason
self.verb = verb
self.tb = tb
super(HumanReadableException, self).__init__(self.get_message())
def _gerund(self):
"""Generate a (likely) gerund form of the English verb.
"""
if ' ' in self.verb:
return self.verb
gerund = self.verb[:-1] if self.verb.endswith('e') else self.verb
gerund += 'ing'
return gerund
def _reasonstr(self):
"""Get the reason as a string."""
if isinstance(self.reason, unicode):
return self.reason
elif isinstance(self.reason, basestring): # Byte string.
return self.reason.decode('utf8', 'ignore')
elif hasattr(self.reason, 'strerror'): # i.e., EnvironmentError
return self.reason.strerror
else:
return u'"{0}"'.format(unicode(self.reason))
def get_message(self):
"""Create the human-readable description of the error, sans
introduction.
"""
raise NotImplementedError
def log(self, logger):
"""Log to the provided `logger` a human-readable message as an
error and a verbose traceback as a debug message.
"""
if self.tb:
logger.debug(self.tb)
logger.error(u'{0}: {1}', self.error_kind, self.args[0])
class FilesystemError(HumanReadableException):
"""An error that occurred while performing a filesystem manipulation
via a function in this module. The `paths` field is a sequence of
pathnames involved in the operation.
"""
def __init__(self, reason, verb, paths, tb=None):
self.paths = paths
super(FilesystemError, self).__init__(reason, verb, tb)
def get_message(self):
# Use a nicer English phrasing for some specific verbs.
if self.verb in ('move', 'copy', 'rename'):
clause = u'while {0} {1} to {2}'.format(
self._gerund(),
displayable_path(self.paths[0]),
displayable_path(self.paths[1])
)
elif self.verb in ('delete', 'write', 'create', 'read'):
clause = u'while {0} {1}'.format(
self._gerund(),
displayable_path(self.paths[0])
)
else:
clause = u'during {0} of paths {1}'.format(
self.verb, u', '.join(displayable_path(p) for p in self.paths)
)
return u'{0} {1}'.format(self._reasonstr(), clause)
def normpath(path):
"""Provide the canonical form of the path suitable for storing in
the database.
"""
path = syspath(path, prefix=False)
path = os.path.normpath(os.path.abspath(os.path.expanduser(path)))
return bytestring_path(path)
def ancestry(path):
"""Return a list consisting of path's parent directory, its
grandparent, and so on. For instance:
>>> ancestry('/a/b/c')
['/', '/a', '/a/b']
The argument should *not* be the result of a call to `syspath`.
"""
out = []
last_path = None
while path:
path = os.path.dirname(path)
if path == last_path:
break
last_path = path
if path:
# don't yield ''
out.insert(0, path)
return out
def sorted_walk(path, ignore=(), logger=None):
"""Like `os.walk`, but yields things in case-insensitive sorted,
breadth-first order. Directory and file names matching any glob
pattern in `ignore` are skipped. If `logger` is provided, then
warning messages are logged there when a directory cannot be listed.
"""
# Make sure the path isn't a Unicode string.
path = bytestring_path(path)
# Get all the directories and files at this level.
try:
contents = os.listdir(syspath(path))
except OSError as exc:
if logger:
logger.warn(u'could not list directory {0}: {1}'.format(
displayable_path(path), exc.strerror
))
return
dirs = []
files = []
for base in contents:
base = bytestring_path(base)
# Skip ignored filenames.
skip = False
for pat in ignore:
if fnmatch.fnmatch(base, pat):
skip = True
break
if skip:
continue
# Add to output as either a file or a directory.
cur = os.path.join(path, base)
if os.path.isdir(syspath(cur)):
dirs.append(base)
else:
files.append(base)
# Sort lists (case-insensitive) and yield the current level.
dirs.sort(key=bytes.lower)
files.sort(key=bytes.lower)
yield (path, dirs, files)
# Recurse into directories.
for base in dirs:
cur = os.path.join(path, base)
# yield from sorted_walk(...)
for res in sorted_walk(cur, ignore, logger):
yield res
def mkdirall(path):
"""Make all the enclosing directories of path (like mkdir -p on the
parent).
"""
for ancestor in ancestry(path):
if not os.path.isdir(syspath(ancestor)):
try:
os.mkdir(syspath(ancestor))
except (OSError, IOError) as exc:
raise FilesystemError(exc, 'create', (ancestor,),
traceback.format_exc())
def fnmatch_all(names, patterns):
"""Determine whether all strings in `names` match at least one of
the `patterns`, which should be shell glob expressions.
"""
for name in names:
matches = False
for pattern in patterns:
matches = fnmatch.fnmatch(name, pattern)
if matches:
break
if not matches:
return False
return True
def prune_dirs(path, root=None, clutter=('.DS_Store', 'Thumbs.db')):
"""If path is an empty directory, then remove it. Recursively remove
path's ancestry up to root (which is never removed) where there are
empty directories. If path is not contained in root, then nothing is
removed. Glob patterns in clutter are ignored when determining
emptiness. If root is not provided, then only path may be removed
(i.e., no recursive removal).
"""
path = normpath(path)
if root is not None:
root = normpath(root)
ancestors = ancestry(path)
if root is None:
# Only remove the top directory.
ancestors = []
elif root in ancestors:
# Only remove directories below the root.
ancestors = ancestors[ancestors.index(root) + 1:]
else:
# Remove nothing.
return
# Traverse upward from path.
ancestors.append(path)
ancestors.reverse()
for directory in ancestors:
directory = syspath(directory)
if not os.path.exists(directory):
# Directory gone already.
continue
if fnmatch_all(os.listdir(directory), clutter):
# Directory contains only clutter (or nothing).
try:
shutil.rmtree(directory)
except OSError:
break
else:
break
def components(path):
"""Return a list of the path components in path. For instance:
>>> components('/a/b/c')
['a', 'b', 'c']
The argument should *not* be the result of a call to `syspath`.
"""
comps = []
ances = ancestry(path)
for anc in ances:
comp = os.path.basename(anc)
if comp:
comps.append(comp)
else: # root
comps.append(anc)
last = os.path.basename(path)
if last:
comps.append(last)
return comps
def _fsencoding():
"""Get the system's filesystem encoding. On Windows, this is always
UTF-8 (not MBCS).
"""
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
if encoding == b'mbcs':
# On Windows, a broken encoding known to Python as "MBCS" is
# used for the filesystem. However, we only use the Unicode API
# for Windows paths, so the encoding is actually immaterial so
# we can avoid dealing with this nastiness. We arbitrarily
# choose UTF-8.
encoding = b'utf8'
return encoding
def bytestring_path(path):
"""Given a path, which is either a bytes or a unicode, returns a str
path (ensuring that we never deal with Unicode pathnames).
"""
# Pass through bytestrings.
if isinstance(path, bytes):
return path
# On Windows, remove the magic prefix added by `syspath`. This makes
# ``bytestring_path(syspath(X)) == X``, i.e., we can safely
# round-trip through `syspath`.
if os.path.__name__ == b'ntpath' and path.startswith(WINDOWS_MAGIC_PREFIX):
path = path[len(WINDOWS_MAGIC_PREFIX):]
# Try to encode with default encodings, but fall back to UTF8.
try:
return path.encode(_fsencoding())
except (UnicodeError, LookupError):
return path.encode('utf8')
def displayable_path(path, separator=u'; '):
"""Attempts to decode a bytestring path to a unicode object for the
purpose of displaying it to the user. If the `path` argument is a
list or a tuple, the elements are joined with `separator`.
"""
if isinstance(path, (list, tuple)):
return separator.join(displayable_path(p) for p in path)
elif isinstance(path, unicode):
return path
elif not isinstance(path, bytes):
# A non-string object: just get its unicode representation.
return unicode(path)
try:
return path.decode(_fsencoding(), 'ignore')
except (UnicodeError, LookupError):
return path.decode('utf8', 'ignore')
def syspath(path, prefix=True):
"""Convert a path for use by the operating system. In particular,
paths on Windows must receive a magic prefix and must be converted
to Unicode before they are sent to the OS. To disable the magic
prefix on Windows, set `prefix` to False---but only do this if you
*really* know what you're doing.
"""
# Don't do anything if we're not on windows
if os.path.__name__ != b'ntpath':
return path
if not isinstance(path, unicode):
# Beets currently represents Windows paths internally with UTF-8
# arbitrarily. But earlier versions used MBCS because it is
# reported as the FS encoding by Windows. Try both.
try:
path = path.decode('utf8')
except UnicodeError:
# The encoding should always be MBCS, Windows' broken
# Unicode representation.
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
path = path.decode(encoding, 'replace')
# Add the magic prefix if it isn't already there.
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
if prefix and not path.startswith(WINDOWS_MAGIC_PREFIX):
if path.startswith(u'\\\\'):
# UNC path. Final path should look like \\?\UNC\...
path = u'UNC' + path[1:]
path = WINDOWS_MAGIC_PREFIX + path
return path
def samefile(p1, p2):
"""Safer equality for paths."""
return shutil._samefile(syspath(p1), syspath(p2))
def remove(path, soft=True):
"""Remove the file. If `soft`, then no error will be raised if the
file does not exist.
"""
path = syspath(path)
if soft and not os.path.exists(path):
return
try:
os.remove(path)
except (OSError, IOError) as exc:
raise FilesystemError(exc, 'delete', (path,), traceback.format_exc())
def copy(path, dest, replace=False):
"""Copy a plain file. Permissions are not copied. If `dest` already
exists, raises a FilesystemError unless `replace` is True. Has no
effect if `path` is the same as `dest`. Paths are translated to
system paths before the syscall.
"""
if samefile(path, dest):
return
path = syspath(path)
dest = syspath(dest)
if not replace and os.path.exists(dest):
raise FilesystemError('file exists', 'copy', (path, dest))
try:
shutil.copyfile(path, dest)
except (OSError, IOError) as exc:
raise FilesystemError(exc, 'copy', (path, dest),
traceback.format_exc())
def move(path, dest, replace=False):
"""Rename a file. `dest` may not be a directory. If `dest` already
exists, raises an OSError unless `replace` is True. Has no effect if
`path` is the same as `dest`. If the paths are on different
filesystems (or the rename otherwise fails), a copy is attempted
instead, in which case metadata will *not* be preserved. Paths are
translated to system paths.
"""
if samefile(path, dest):
return
path = syspath(path)
dest = syspath(dest)
if os.path.exists(dest) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest),
traceback.format_exc())
# First, try renaming the file.
try:
os.rename(path, dest)
except OSError:
# Otherwise, copy and delete the original.
try:
shutil.copyfile(path, dest)
os.remove(path)
except (OSError, IOError) as exc:
raise FilesystemError(exc, 'move', (path, dest),
traceback.format_exc())
def link(path, dest, replace=False):
"""Create a symbolic link from path to `dest`. Raises an OSError if
`dest` already exists, unless `replace` is True. Does nothing if
`path` == `dest`."""
if (samefile(path, dest)):
return
path = syspath(path)
dest = syspath(dest)
if os.path.exists(dest) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest),
traceback.format_exc())
try:
os.symlink(path, dest)
except OSError:
raise FilesystemError('Operating system does not support symbolic '
'links.', 'link', (path, dest),
traceback.format_exc())
def unique_path(path):
"""Returns a version of ``path`` that does not exist on the
filesystem. Specifically, if ``path` itself already exists, then
something unique is appended to the path.
"""
if not os.path.exists(syspath(path)):
return path
base, ext = os.path.splitext(path)
match = re.search(br'\.(\d)+$', base)
if match:
num = int(match.group(1))
base = base[:match.start()]
else:
num = 0
while True:
num += 1
new_path = b'%s.%i%s' % (base, num, ext)
if not os.path.exists(new_path):
return new_path
# Note: The Windows "reserved characters" are, of course, allowed on
# Unix. They are forbidden here because they cause problems on Samba
# shares, which are sufficiently common as to cause frequent problems.
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
CHAR_REPLACE = [
(re.compile(r'[\\/]'), u'_'), # / and \ -- forbidden everywhere.
(re.compile(r'^\.'), u'_'), # Leading dot (hidden files on Unix).
(re.compile(r'[\x00-\x1f]'), u''), # Control characters.
(re.compile(r'[<>:"\?\*\|]'), u'_'), # Windows "reserved characters".
(re.compile(r'\.$'), u'_'), # Trailing dots.
(re.compile(r'\s+$'), u''), # Trailing whitespace.
]
def sanitize_path(path, replacements=None):
"""Takes a path (as a Unicode string) and makes sure that it is
legal. Returns a new path. Only works with fragments; won't work
reliably on Windows when a path begins with a drive letter. Path
separators (including altsep!) should already be cleaned from the
path components. If replacements is specified, it is used *instead*
of the default set of replacements; it must be a list of (compiled
regex, replacement string) pairs.
"""
replacements = replacements or CHAR_REPLACE
comps = components(path)
if not comps:
return ''
for i, comp in enumerate(comps):
for regex, repl in replacements:
comp = regex.sub(repl, comp)
comps[i] = comp
return os.path.join(*comps)
def truncate_path(path, length=MAX_FILENAME_LENGTH):
"""Given a bytestring path or a Unicode path fragment, truncate the
components to a legal length. In the last component, the extension
is preserved.
"""
comps = components(path)
out = [c[:length] for c in comps]
base, ext = os.path.splitext(comps[-1])
if ext:
# Last component has an extension.
base = base[:length - len(ext)]
out[-1] = base + ext
return os.path.join(*out)
def _legalize_stage(path, replacements, length, extension, fragment):
"""Perform a single round of path legalization steps
(sanitation/replacement, encoding from Unicode to bytes,
extension-appending, and truncation). Return the path (Unicode if
`fragment` is set, `bytes` otherwise) and whether truncation was
required.
"""
# Perform an initial sanitization including user replacements.
path = sanitize_path(path, replacements)
# Encode for the filesystem.
if not fragment:
path = bytestring_path(path)
# Preserve extension.
path += extension.lower()
# Truncate too-long components.
pre_truncate_path = path
path = truncate_path(path, length)
return path, path != pre_truncate_path
def legalize_path(path, replacements, length, extension, fragment):
"""Given a path-like Unicode string, produce a legal path. Return
the path and a flag indicating whether some replacements had to be
ignored (see below).
The legalization process (see `_legalize_stage`) consists of
applying the sanitation rules in `replacements`, encoding the string
to bytes (unless `fragment` is set), truncating components to
`length`, appending the `extension`.
This function performs up to three calls to `_legalize_stage` in
case truncation conflicts with replacements (as can happen when
truncation creates whitespace at the end of the string, for
example). The limited number of iterations iterations avoids the
possibility of an infinite loop of sanitation and truncation
operations, which could be caused by replacement rules that make the
string longer. The flag returned from this function indicates that
the path has to be truncated twice (indicating that replacements
made the string longer again after it was truncated); the
application should probably log some sort of warning.
"""
if fragment:
# Outputting Unicode.
extension = extension.decode('utf8', 'ignore')
first_stage_path, _ = _legalize_stage(
path, replacements, length, extension, fragment
)
# Convert back to Unicode with extension removed.
first_stage_path, _ = os.path.splitext(displayable_path(first_stage_path))
# Re-sanitize following truncation (including user replacements).
second_stage_path, retruncated = _legalize_stage(
first_stage_path, replacements, length, extension, fragment
)
# If the path was once again truncated, discard user replacements
# and run through one last legalization stage.
if retruncated:
second_stage_path, _ = _legalize_stage(
first_stage_path, None, length, extension, fragment
)
return second_stage_path, retruncated
def str2bool(value):
"""Returns a boolean reflecting a human-entered string."""
return value.lower() in ('yes', '1', 'true', 't', 'y')
def as_string(value):
"""Convert a value to a Unicode object for matching with a query.
None becomes the empty string. Bytestrings are silently decoded.
"""
if value is None:
return u''
elif isinstance(value, buffer):
return bytes(value).decode('utf8', 'ignore')
elif isinstance(value, bytes):
return value.decode('utf8', 'ignore')
else:
return unicode(value)
def plurality(objs):
"""Given a sequence of hashble objects, returns the object that
is most common in the set and the its number of appearance. The
sequence must contain at least one object.
"""
c = Counter(objs)
if not c:
raise ValueError('sequence must be non-empty')
return c.most_common(1)[0]
def cpu_count():
"""Return the number of hardware thread contexts (cores or SMT
threads) in the system.
"""
# Adapted from the soundconverter project:
# https://github.com/kassoulet/soundconverter
if sys.platform == b'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif sys.platform == b'darwin':
try:
num = int(command_output([b'/usr/sbin/sysctl', b'-n', b'hw.ncpu']))
except (ValueError, OSError, subprocess.CalledProcessError):
num = 0
else:
try:
num = os.sysconf(b'SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
return 1
def command_output(cmd, shell=False):
"""Runs the command and returns its output after it has exited.
``cmd`` is a list of byte string arguments starting with the command names.
If ``shell`` is true, ``cmd`` is assumed to be a string and passed to a
shell to execute.
If the process exits with a non-zero return code
``subprocess.CalledProcessError`` is raised. May also raise
``OSError``.
This replaces `subprocess.check_output` which can have problems if lots of
output is sent to stderr.
"""
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=platform.system() != b'Windows',
shell=shell
)
stdout, stderr = proc.communicate()
if proc.returncode:
raise subprocess.CalledProcessError(
returncode=proc.returncode,
cmd=b' '.join(cmd),
)
return stdout
def max_filename_length(path, limit=MAX_FILENAME_LENGTH):
"""Attempt to determine the maximum filename length for the
filesystem containing `path`. If the value is greater than `limit`,
then `limit` is used instead (to prevent errors when a filesystem
misreports its capacity). If it cannot be determined (e.g., on
Windows), return `limit`.
"""
if hasattr(os, b'statvfs'):
try:
res = os.statvfs(path)
except OSError:
return limit
return min(res[9], limit)
else:
return limit
def open_anything():
"""Return the system command that dispatches execution to the correct
program.
"""
sys_name = platform.system()
if sys_name == 'Darwin':
base_cmd = 'open'
elif sys_name == 'Windows':
base_cmd = 'start'
else: # Assume Unix
base_cmd = 'xdg-open'
return base_cmd
def interactive_open(targets, command=None):
"""Open the files in `targets` by `exec`ing a new command. (The new
program takes over, and Python execution ends: this does not fork a
subprocess.)
If `command` is provided, use it. Otherwise, use an OS-specific
command (from `open_anything`) to open the file.
Can raise `OSError`.
"""
if command:
command = command.encode('utf8')
try:
command = [c.decode('utf8')
for c in shlex.split(command)]
except ValueError: # Malformed shell tokens.
command = [command]
command.insert(0, command[0]) # for argv[0]
else:
base_cmd = open_anything()
command = [base_cmd, base_cmd]
command += targets
return os.execlp(*command)
|
|
"""
This module contains util functions may be shared by both BioThings data-hub and web components.
In general, do not include utils depending on any third-party modules.
"""
import math, statistics
import time, re
import logging
from pprint import pprint, pformat
import copy
from .common import timesofar, is_scalar, is_float, is_str, is_int
def sumiflist(val):
if type(val) == list:
return sum(val)
else:
return val
def maxminiflist(val,func):
if type(val) == list:
return func(val)
else:
return val
def flatten_stats(stats):
# after merge_struct, stats can be merged together as list (merge_struct
# is only about data structures). Re-adjust here considering there could lists
# that need to be sum'ed and min/max to be dealt with
stats["_count"] = sumiflist(stats["_count"])
stats["_sum"] = sumiflist(stats["_sum"])
stats["_max"] = maxminiflist(stats["_max"],max)
stats["_min"] = maxminiflist(stats["_min"],min)
return stats
def merge_stats(target_stats, tomerge_stats):
target_stats = flatten_stats(target_stats)
tomerge_stats = flatten_stats(tomerge_stats)
# sum the counts and the sums
target_stats["_count"] = target_stats["_count"] + tomerge_stats["_count"]
target_stats["_sum"] = target_stats["_sum"] + tomerge_stats["_sum"]
# adjust min and max
if tomerge_stats["_max"] > target_stats["_max"]:
target_stats["_max"] = tomerge_stats["_max"]
if tomerge_stats["_min"] < target_stats["_min"]:
target_stats["_min"] = tomerge_stats["_min"]
# extend values
target_stats.get("__vals",[]).extend(tomerge_stats.get("__vals",[]))
def merge_record(target,tomerge,mode):
for k in tomerge:
if k in target:
if k == "_stats":
tgt_stats = target["_stats"]
tom_stats = tomerge["_stats"]
merge_stats(tgt_stats,tom_stats)
continue
for typ in tomerge[k]:
if mode == "type":
# we can safely update and possibly overwrite
# target with tomerge's values and in mode "type"
# there's no actual information for scalar fields
# (eg a string field will be like: {"myfield" : {str:{}}}
target[k].update(tomerge[k])
elif mode == "mapping":
# keep track on splitable (precedence: splitable > non-splitable)
# so don't merge if target has a "split" and tomerge has not,
# as we would loose that information
if "split" in tomerge[k][typ]:
target[k].update(tomerge[k])
else:
if typ in target[k]:
# same key, same type, need to merge stats
if not "_stats" in tomerge[k][typ]:
# we try to merge record at a too higher level, need to merge deeper
target[k] = merge_record(target[k],tomerge[k],mode)
continue
tgt_stats = target[k][typ]["_stats"]
tom_stats = tomerge[k][typ]["_stats"]
merge_stats(tgt_stats,tom_stats)
else:
# key exists but with a different type, create new type
if mode == "type":
target[k].update(tomerge[k])
else:
target[k].setdefault(typ,{}).update(tomerge[k][typ])
else:
# key doesn't exist, create key
if mode == "type":
target.setdefault(k,{}).update(tomerge[k])
else:
target.setdefault(k,{}).update(tomerge[k])
return target
def inspect(struct,key=None,mapt=None,mode="type",level=0,logger=logging):
"""
Explore struct and report types contained in it.
- struct: is the data structure to explore
- mapt: if not None, will complete that type map with passed struct. This is usefull
when iterating over a dataset of similar data, trying to find a good type summary
contained in that dataset.
- (level: is for internal purposes, mostly debugging)
- mode: see inspect_docs() documentation
"""
stats_tpl = {"_stats" : {"_min":math.inf,"_max":-math.inf,"_count":0,"_sum":0,"__vals":[]}}
def report(val,drep):
drep["_stats"] = flatten_stats(drep["_stats"])
drep["_stats"]["_count"] += 1
drep["_stats"]["_sum"] += val
if val < drep["_stats"]["_min"]:
drep["_stats"]["_min"] = val
if val > drep["_stats"]["_max"]:
drep["_stats"]["_max"] = val
if mode== "deepstats":
# just keep track of vals for now, stats are computed at the end
drep["_stats"]["__vals"].append(val)
# init recording structure if none were passed
if mapt is None:
mapt = {}
if type(struct) == dict:
# was this struct already explored before ? was it a list for that previous doc ?
# then we have to pretend here it's also a list even if not, because we want to
# report the list structure
for k in struct:
if mapt and list in mapt:# and key == k:
already_explored_as_list = True
else:
already_explored_as_list = False
if False:#already_explored_as_list:
mapt[list].setdefault(k,{})
typ = inspect(struct[k],key=k,mapt=mapt[list][k],mode=mode,level=level+1)
mapt[list].update({k:typ})
else:
mapt.setdefault(k,{})
typ = inspect(struct[k],key=k,mapt=mapt[k],mode=mode,level=level+1)
if "stats" in mode:
mapt.setdefault("_stats",copy.deepcopy(stats_tpl["_stats"]))
report(1,mapt)
elif type(struct) == list:
mapl = {}
for e in struct:
typ = inspect(e,key=key,mapt=mapl,mode=mode,level=level+1)
mapl.update(typ)
if "stats" in mode:
# here we report the number of elements in the list
mapl.update(copy.deepcopy(stats_tpl))
report(len(struct),mapl)
# and here we just report that one document had a list
mapt.setdefault("_stats",copy.deepcopy(stats_tpl["_stats"]))
report(1,mapt)
# if mapt exist, it means it's been explored previously but not as a list,
# instead of mixing dict and list types, we want to normalize so we merge the previous
# struct into that current list
if mapt and list in mapt:
mapt[list] = merge_record(mapt[list],mapl,mode)
else:
mapt.setdefault(list,{})
mapt[list].update(mapl)
elif is_scalar(struct):
typ = type(struct)
if mode == "type":
mapt[typ] = {}
elif mode == "mapping":
# splittable string ?
if is_str(struct) and len(re.split(" +",struct.strip())) > 1:
mapt[typ] = {"split":{}}
else:
mapt[typ] = {}
else:
mapt.setdefault(typ,copy.deepcopy(stats_tpl))
if is_str(struct):
report(len(struct),mapt[typ])
elif is_int(struct) or is_float(struct):
report(struct,mapt[typ])
elif type(struct) == bool:
report(struct,mapt[typ])
else:
raise TypeError("Can't analyze type %s" % type(struct))
return mapt
def merge_scalar_list(mapt,mode):
# TODO: this looks "strangely" to merge_record... refactoring needed ?
# if a list is found and other keys at same level are found in that
# list, then we need to merge. Ex: ...,{"bla":1},["bla":2],...
if "stats" in mode:
raise NotImplementedError("merging with stats is not supported (yet)")
if is_scalar(mapt):
return
if list in mapt.keys():
other_keys = [k for k in mapt if k != list]
for e in other_keys:
if e in mapt[list]:
tomerge = mapt.pop(e)
if "stats" in mode:
for typ in tomerge:
if not type(typ) == type:
continue
if not typ in mapt[list][e]:
mapt[list][e][typ] = tomerge[typ]
# Note: don't update [list]["_stats"], we keep the original stats
# that is, what's actually been inspected on the list, originally
# (and we can't really update those stats as scalar stats aren't relevant
# to a list context
elif typ == "_stats":
#merge_stats(mapt[list][e]["_stats"],tomerge["_stats"])
pass
else:
merge_stats(mapt[list][e][typ]["_stats"],tomerge[typ]["_stats"])
elif mode == "mapping":
for typ in tomerge:
if not typ in mapt[list][e]:
# that field exist in the [list] but with a different type
# just merge the typ
mapt[list][e].update(tomerge)
elif "split" in tomerge[typ]:
mapt[list][e].update(tomerge)
else:
# assuming what's in [list] is enough, we just popped the value
# from mapt, that's enough
pass
# explore further
for k in mapt[list]:
merge_scalar_list(mapt[list][k],mode)
elif type(mapt) == dict:
for k in mapt:
merge_scalar_list(mapt[k],mode)
elif type(mapt) == list:
for e in mapt:
merge_scalar_list(e,mode)
def inspect_docs(docs,mode="type",clean=True,merge=False,logger=logging):
"""Inspect docs and return a summary of its structure:
- mode:
+ "type": explore documents and report strict data structure
+ "mapping": same as type but also perform test on data so guess best mapping
(eg. check if a string is splitable, etc...). Implies merge=True
+ "stats": explore documents and compute basic stats (count,min,max,sum)
+ "deepstats": same as stats but record values and also compute mean,stdev,median
(memory intensive...)
- clean: don't delete recorded vqlues or temporary results
- merge: merge scalar into list when both exist (eg. {"val":..} and [{"val":...}]
"""
def post(mapt, mode,clean):
if type(mapt) == dict:
for k in list(mapt.keys()):
if is_str(k) and k.startswith("__"):
if k == "__vals" and mode == "deepstats":
if len(mapt["__vals"]) > 1:
mapt["_stdev"] = statistics.stdev(mapt["__vals"])
mapt["_median"] = statistics.median(mapt["__vals"])
mapt["_mean"] = statistics.mean(mapt["__vals"])
if clean:
mapt.pop(k)
else:
post(mapt[k],mode,clean)
elif type(mapt) == list:
for e in mapt:
post(e,mode,clean)
merge = mode == "mapping" and True or merge
mapt = {}
cnt = 0
t0 = time.time()
innert0 = time.time()
for doc in docs:
inspect(doc,mapt=mapt,mode=mode)
cnt += 1
if cnt % 10000 == 0:
logger.info("%d documents processed [%s]" % (cnt,timesofar(innert0)))
innert0 = time.time()
logger.info("Done [%s]" % timesofar(t0))
logger.info("Post-processing (stats)")
post(mapt,mode,clean)
if merge:
merge_scalar_list(mapt,mode)
return mapt
if __name__ == "__main__":
d1 = {"id" : "124",'lofd': [{"val":34.3},{"ul":"bla"}],"d":{"start":134,"end":5543}}
d2 = {"id" : "5",'lofd': {"oula":"mak","val":34},"d":{"start":134,"end":5543}}
d3 = {"id" : "890",'lofd': [{"val":34}],"d":{"start":134,"end":5543}}
# merge either ways in the same
m12 = inspect_docs([d1,d2])
m21 = inspect_docs([d2,d1])
#if undordered list, then:
assert m21 == m12, "\nm21=%s\n!=\nm12=%s" % (pformat(m21),pformat(m12))
# val can be an int and a float
m1 = inspect_docs([{"val":34},{"val":1.2}])
# set: types can be in any order
assert set(m1["val"]) == {int,float}
# even if val is in a list
m2 = inspect_docs([{"val":34},[{"val":1.2}]])
# list and val not merged
assert set(m2.keys()) == {'val',list}
# another example with a mix a dict and list (see "p")
od1 = {"id" : "124","d":[{"p":123},{"p":456}]}
od2 = {"id" : "124","d":[{"p":123},{"p":[456,789]}]}
m12 = inspect_docs([od1,od2],mode="type")
m21 = inspect_docs([od2,od1],mode="type")
assert m12 == m21
# "p" is a integer or a list of integer
assert m12["d"][list]["p"].keys() == {list,int}
# stats
m = {}
inspect(d1,mapt=m,mode="stats")
# some simple check
assert set(m["id"].keys()) == {str}
assert m["id"][str]["_stats"]["_count"] == 1
assert m["id"][str]["_stats"]["_max"] == 3
assert m["id"][str]["_stats"]["_min"] == 3
assert m["id"][str]["_stats"]["_sum"] == 3
assert m["lofd"].keys() == {list,"_stats"}
# "global" stats (basically record number of docs which have passed this "way")
assert m["lofd"]["_stats"]["_count"] == 1
assert m["lofd"]["_stats"]["_max"] == 1
assert m["lofd"]["_stats"]["_min"] == 1
assert m["lofd"]["_stats"]["_sum"] == 1
# list's stats
assert m["lofd"][list]["_stats"]["_count"] == 1
assert m["lofd"][list]["_stats"]["_max"] == 2
assert m["lofd"][list]["_stats"]["_min"] == 2
assert m["lofd"][list]["_stats"]["_sum"] == 2
# one list's elem stats
assert m["lofd"][list]["val"][float]["_stats"]["_count"] == 1
assert m["lofd"][list]["val"][float]["_stats"]["_max"] == 34.3
assert m["lofd"][list]["val"][float]["_stats"]["_min"] == 34.3
assert m["lofd"][list]["val"][float]["_stats"]["_sum"] == 34.3
# again
inspect(d1,mapt=m,mode="stats")
assert m["id"][str]["_stats"]["_count"] == 2
assert m["id"][str]["_stats"]["_max"] == 3
assert m["id"][str]["_stats"]["_min"] == 3
assert m["id"][str]["_stats"]["_sum"] == 6
assert m["lofd"]["_stats"]["_count"] == 2
assert m["lofd"]["_stats"]["_max"] == 1
assert m["lofd"]["_stats"]["_min"] == 1
assert m["lofd"]["_stats"]["_sum"] == 2
assert m["lofd"][list]["_stats"]["_count"] == 2
assert m["lofd"][list]["_stats"]["_max"] == 2
assert m["lofd"][list]["_stats"]["_min"] == 2
assert m["lofd"][list]["_stats"]["_sum"] == 4
assert m["lofd"][list]["val"][float]["_stats"]["_count"] == 2
assert m["lofd"][list]["val"][float]["_stats"]["_max"] == 34.3
assert m["lofd"][list]["val"][float]["_stats"]["_min"] == 34.3
assert m["lofd"][list]["val"][float]["_stats"]["_sum"] == 68.6
# mix with d2
inspect(d2,mapt=m,mode="stats")
assert m["id"][str]["_stats"]["_count"] == 3
assert m["id"][str]["_stats"]["_max"] == 3
assert m["id"][str]["_stats"]["_min"] == 1 # new min
assert m["id"][str]["_stats"]["_sum"] == 7
assert m["lofd"]["_stats"]["_count"] == 3
assert m["lofd"]["_stats"]["_max"] == 1
assert m["lofd"]["_stats"]["_min"] == 1
assert m["lofd"]["_stats"]["_sum"] == 3
assert m["lofd"][list]["_stats"]["_count"] == 2 # not incremented as in d2 it's not a list
assert m["lofd"][list]["_stats"]["_max"] == 2
assert m["lofd"][list]["_stats"]["_min"] == 2
assert m["lofd"][list]["_stats"]["_sum"] == 4
# now float & int
assert m["lofd"][list]["val"][float]["_stats"]["_count"] == 2
assert m["lofd"][list]["val"][float]["_stats"]["_max"] == 34.3
assert m["lofd"][list]["val"][float]["_stats"]["_min"] == 34.3
assert m["lofd"][list]["val"][float]["_stats"]["_sum"] == 68.6
# val{int} wasn't merged
assert m["lofd"]["val"][int]["_stats"]["_count"] == 1
assert m["lofd"]["val"][int]["_stats"]["_max"] == 34
assert m["lofd"]["val"][int]["_stats"]["_min"] == 34
assert m["lofd"]["val"][int]["_stats"]["_sum"] == 34
# d2 again
inspect(d2,mapt=m,mode="stats")
assert m["id"][str]["_stats"]["_count"] == 4
assert m["id"][str]["_stats"]["_max"] == 3
assert m["id"][str]["_stats"]["_min"] == 1
assert m["id"][str]["_stats"]["_sum"] == 8
assert m["lofd"]["_stats"]["_count"] == 4
assert m["lofd"]["_stats"]["_max"] == 1
assert m["lofd"]["_stats"]["_min"] == 1
assert m["lofd"]["_stats"]["_sum"] == 4
assert m["lofd"][list]["_stats"]["_count"] == 2
assert m["lofd"][list]["_stats"]["_max"] == 2
assert m["lofd"][list]["_stats"]["_min"] == 2
assert m["lofd"][list]["_stats"]["_sum"] == 4
assert m["lofd"][list]["val"][float]["_stats"]["_count"] == 2
assert m["lofd"][list]["val"][float]["_stats"]["_max"] == 34.3
assert m["lofd"][list]["val"][float]["_stats"]["_min"] == 34.3
assert m["lofd"][list]["val"][float]["_stats"]["_sum"] == 68.6
assert m["lofd"]["val"][int]["_stats"]["_count"] == 2
assert m["lofd"]["val"][int]["_stats"]["_max"] == 34
assert m["lofd"]["val"][int]["_stats"]["_min"] == 34
assert m["lofd"]["val"][int]["_stats"]["_sum"] == 68
# all counts should be 10
m = inspect_docs([d1] * 10,mode="stats")
assert m["d"]["end"][int]["_stats"]["_count"] == 10
assert m["d"]["start"][int]["_stats"]["_count"] == 10
assert m["id"][str]["_stats"]["_count"] == 10
assert m["lofd"]["_stats"]["_count"] == 10
assert m["lofd"][list]["_stats"]["_count"] == 10
assert m["lofd"][list]["ul"][str]["_stats"]["_count"] == 10
assert m["lofd"][list]["val"][float]["_stats"]["_count"] == 10
#### test merge_stats
###nd1 = {"id" : "124",'lofd': [{"val":34.3},{"ul":"bla"}]}
###nd2 = {"id" : "5678",'lofd': {"val":50.2}}
###m = {}
###inspect(nd1,mapt=m,mode="deepstats")
###inspect(nd2,mapt=m,mode="deepstats")
###assert set(m["lofd"].keys()) == {list,'val','_stats'}, "%s" % pformat(m)
###assert m["lofd"][list]["val"][float]["_stats"] == {'__vals': [34.3], '_count': 1, '_max': 34.3, '_min': 34.3, '_sum': 34.3}
#### merge stats into the left param
###merge_stats(m["lofd"][list]["val"][float]["_stats"],m["lofd"]["val"][float]["_stats"])
###assert m["lofd"][list]["val"][float]["_stats"] == {'__vals': [34.3, 50.2], '_count': 2, '_max': 50.2, '_min': 34.3, '_sum': 84.5}
# mapping mode (splittable strings)
# "bla" is splitable in one case, not in the other
# "oula" is splitable, "arf" is not
sd1 = {"id" : "124",'vals': [{"oula":"this is great"},{"bla":"I am splitable","arf":"ENS355432"}]}
sd2 = {"id" : "5678",'vals': {"bla":"rs45653","void":654}}
sd3 = {"id" : "124",'vals': [{"bla":"thisisanid"}]}
m = {}
inspect(sd3,mapt=m,mode="mapping")
# bla not splitable here
assert m["vals"][list]["bla"][str] == {}
inspect(sd1,mapt=m,mode="mapping")
# now it is
assert m["vals"][list]["bla"][str] == {"split":{}}
inspect(sd2,mapt=m,mode="mapping")
# not splitable in sd2
assert m["vals"]["bla"][str] == {}
# mapping with type of type
sd1 = {"_id" : "123","homologene" : {"id":"bla","gene" : [[123,456],[789,102]]}}
m = inspect_docs([sd1],mode="mapping")
import biothings.utils.es as es
mapping = es.generate_es_mapping(m)
assert mapping == {'homologene': {'properties': {'gene': {'type': 'integer'},
'id': {'analyzer': 'string_lowercase', 'type': 'string'}}}}, "mapping %s" % mapping
# ok, "bla" is either a scalar or in a list, test merge
md1 = {"id" : "124",'vals': [{"oula":"this is great"},{"bla":"rs24543","arf":"ENS355432"}]}
md2 = {"id" : "5678",'vals': {"bla":"I am splitable in a scalar","void":654}}
# bla is a different type here
md3 = {"id" : "5678",'vals': {"bla":1234}}
m = inspect_docs([md1,md2],mode="mapping") # "mapping" implies merge=True
assert not "bla" in m["vals"]
assert m["vals"][list]["bla"] == {str: {'split': {}}} # splittable str from md2 merge to list
m = inspect_docs([md1,md3],mode="mapping")
assert not "bla" in m["vals"]
assert m["vals"][list]["bla"] == {int: {}, str: {}} # keep as both types
m = inspect_docs([md1,md2,md3],mode="mapping")
assert not "bla" in m["vals"]
assert m["vals"][list]["bla"] == {int: {}, str: {'split': {}}} # splittable kept + merge int to keep both types
#### test merge scalar/list with stats
#### unmerged is a inspect-doc with mode=stats, structure is:
#### id and name keys are both as root keys and in [list]
###insdoc = {list:
### {'_stats': {'_count': 10, '_max': 200, '_sum': 1000, '_min': 2},
### 'id': {str: {'_stats': {'_count': 100, '_max': 10, '_sum': 1000, '_min': 1}}},
### 'name': {str: {'_stats': {'_count': 500, '_max': 5, '_sum': 500, '_min': 0.5}}}},
### 'id': {str: {'_stats': {'_count': 300, '_max': 30, '_sum': 300, '_min': 3}},
### int: {'_stats': {'_count': 1, '_max': 1, '_sum': 1, '_min': 1}}},
### 'name': {str: {'_stats': {'_count': 400, '_max': 40, '_sum': 4000, '_min': 4}}}}
###merge_scalar_list(insdoc,mode="stats")
#### root keys have been merged into [llist] (even id as an integer, bc it's merged based on
#### key name, not key name *and* type
###assert list(insdoc) == [list]
#### check merged stats for "id"
###assert insdoc[list]["id"][str]["_stats"]["_count"] == 400 # 300 + 100
###assert insdoc[list]["id"][str]["_stats"]["_max"] == 30 # from root key
###assert insdoc[list]["id"][str]["_stats"]["_min"] == 1 # from list key
###assert insdoc[list]["id"][str]["_stats"]["_sum"] == 1300 # 1000 + 300
#### "id" as in integer is also merged, stats are kept
###assert insdoc[list]["id"][int]["_stats"]["_count"] == 1
###assert insdoc[list]["id"][int]["_stats"]["_max"] == 1
###assert insdoc[list]["id"][int]["_stats"]["_min"] == 1
###assert insdoc[list]["id"][int]["_stats"]["_sum"] == 1
#### check merged stats for "name"
###assert insdoc[list]["name"][str]["_stats"]["_count"] == 900 # 500 + 400
###assert insdoc[list]["name"][str]["_stats"]["_max"] == 40 # from root key
###assert insdoc[list]["name"][str]["_stats"]["_min"] == 0.5 # from list key
###assert insdoc[list]["name"][str]["_stats"]["_sum"] == 4500 # 4000 + 500
#### [list] stats unchanged
###assert insdoc[list]["_stats"]["_count"] == 10
###assert insdoc[list]["_stats"]["_max"] == 200
###assert insdoc[list]["_stats"]["_min"] == 2
###assert insdoc[list]["_stats"]["_sum"] == 1000
d1 = {'go': {'BP': {'term': 'skeletal muscle fiber development', 'qualifier': 'NOT', 'pubmed': 1234, 'id': \
'GO:0048741', 'evidence': 'IBA'}}, '_id': '101362076'}
d2 = {'go': {'BP': [{'term': 'ubiquitin-dependent protein catabolic process', 'pubmed': 5678, 'id': 'GO:0006511', \
'evidence': 'IEA'}, {'term': 'protein deubiquitination', 'pubmed': [2222, 3333], 'id': 'GO:0016579', 'evidence': \
'IEA'}]}, '_id': '101241878'}
m = inspect_docs([d1,d1,d2,d2],mode="stats")
#pprint(m)
# more merge tests involving real case, deeply nested
# here, go.BP contains a list and some scalars that should be merge
# together, but also go.BP.pubmed also contains list and scalars
# needed to be merged together
insdocdeep = {'_id': {str: {}},
'go': {
'BP': {
'evidence': {str: {}},
'id': {str: {}},
'pubmed': {
list: {int: {}},
int: {}},
'qualifier': {str: {}},
'term': {str: {}},
list: {
'evidence': {str: {}},
'id': {str: {}},
'pubmed': {
list: {int: {}},
int: {}},
'qualifier': {str: {}},
'term': {str: {}}},
}
}
}
merge_scalar_list(insdocdeep,mode="type")
# we merge the first level
assert list(insdocdeep["go"]["BP"].keys()) == [list]
# and also the second one
assert list(insdocdeep["go"]["BP"][list]["pubmed"].keys()) == [list]
|
|
# regression_tests.py -- a rewrite of regression_tests.sh for Windows
# using Python 3
#
# Roger Dannenberg Jan 2017, 2020
#
# Run this in the o2/tests directory where it is found
#
# get print to be compatible even if using python 2.x:
from __future__ import print_function
import sys
import os
import platform
import subprocess
import shlex
import threading
from threading import Timer
from checkports import checkports
import time
print("Optional argument is (relative) path to tests, e.g. ../Release")
print_all_output = False
IS_OSX = False
TIMEOUT_SEC = 250
LOCALDOMAIN = "local" # but on linux, it's "localhost"
# I saw a failure of oscbndlsend+oscbndlrecv because port 8100 could
# not be bound, but I could then run by hand, so I am guessing that
# maybe it was used in a previous test and linux would not reuse it
# so quickly. So now, we wait betwen tests to see if it helps.
# 5s was not enough, but 30s seems to work. Trying 20s now.
STALL_SEC = 20 # time after run to make sure ports are free
if platform.system() == "Darwin":
STALL_SEC = 1 # I don't think we need to stall for macOS
IS_OSX = True
input("macOS tip: turn Firewall OFF to avoid orphaned ports ")
HTMLOPEN = "open "
allOK = True
# Is this Windows?
EXE = ""
if os.name == 'nt':
EXE = ".exe"
HTMLOPEN = "start firefox "
# Find the binaries
if os.path.isdir(os.path.join(os.getcwd(), '../Debug')):
BIN="../Debug"
else:
BIN=".."
# In linux, there is likely to be a debug version of the
# library copied to ../Debug, but tests are built in ..
if platform.system() == 'Linux':
BIN=".."
HTMLOPEN = "xdg-open "
LOCALDOMAIN = "localhost"
if len(sys.argv) >= 2:
BIN = sys.argv[1]
print("Directory for test binaries:", BIN)
def findLineInString(line, aString):
return ('\n' + line + '\n') in aString
def kill_process(process, command):
print("\nTimeout: killing", command)
process.kill()
class runInBackground(threading.Thread):
def __init__(self, command):
self.command = command
self.output = ""
self.errout = ""
threading.Thread.__init__(self)
def run(self):
output1 = ""
output2 = ""
args = shlex.split(self.command)
args[0] = BIN + '/' + args[0] + EXE
process = subprocess.Popen(args,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
timer = Timer(TIMEOUT_SEC, kill_process,
args=[process, self.command])
try:
timer.start()
(self.output, self.errout) = process.communicate()
finally:
timer.cancel()
self.output = self.output.decode("utf-8").replace('\r\n', '\n')
self.errout = self.errout.decode("utf-8").replace('\r\n', '\n')
# runTest testname - runs testname, saving output in output.txt,
# searches output.txt for single full line containing "DONE",
# returns status=0 if DONE was found (indicating success), or
# else status=-1 (indicating failure).
def runTest(command, stall=False, quit_on_port_loss=False):
global allOK
print(command.rjust(30) + ": ", end='', flush=True)
args = shlex.split(command)
args[0] = BIN + '/' + args[0] + EXE
process = subprocess.Popen(args,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
timer = Timer(TIMEOUT_SEC, process.kill)
try:
timer.start()
(stdout, stderr) = process.communicate()
finally:
timer.cancel()
stdout = stdout.decode("utf-8").replace('\r\n', '\n')
stderr = stderr.decode("utf-8").replace('\r\n', '\n')
if stall: dostall()
portsOK, countmsg = checkports(False, False)
if findLineInString("DONE", stdout):
print("PASS", countmsg)
# to return success (True), process must not have orphaned a port,
# but I can't figure out why macOS orphans ports, and I can't get
# through a complete test run without losing at least one port,
# so we print errors, but we do not stop the testing
if ((not IS_OSX) or quit_on_port_loss) and (not portsOK):
allOK = False # halt the testing
else:
allOK = False
if (not portsOK) or (not allOK):
if not portsOK:
print("FAIL a port was not freed, now we have fewer", countmsg)
print("**** Failing output:")
print(stdout)
print("**** Failing error output:")
print(stderr)
elif print_all_output:
print("**** stdout")
print(stdout)
return allOK
def dostall():
print("stalling...", end="", flush=True)
time.sleep(STALL_SEC)
print("\b\b\b\b\b\b\b\b\b\b\b", end="")
def startDouble(prog1, prog2, url=""):
name2 = prog2 if url == "" else url
print((prog1 + '+' + name2).rjust(30) + ": ", end='', flush=True)
p1 = runInBackground(prog1)
p1.start()
p2 = runInBackground(prog2)
p2.start()
return (p1, p2)
def finishDouble(prog1, p1, out1, prog2, p2, out2, stall):
global allOK
p1.join()
p2.join()
time.sleep(1) # debugging test: is there a race to get stdout?
if stall: dostall()
portsOK, countmsg = checkports(False, False)
if findLineInString(out1, p1.output):
if findLineInString(out2, p2.output):
print("PASS", countmsg)
if (not IS_OSX) and (not portsOK):
allOK = False # halt the testing
else:
allOK = False
else:
allOK = False
if (not portsOK) or (not allOK):
print("FAIL")
print("**** Failing output from " + prog1)
print(p1.output)
print("**** Failing error output from " + prog1)
print(p1.errout)
print("**** Failing output from " + prog2)
print(p2.output)
print("**** Failing error output from " + prog2)
print(p2.errout)
elif print_all_output:
print("**** p1.output")
print(p1.output)
print("**** p2.output")
print(p2.output)
return allOK
def runDouble(prog1, out1, prog2, out2, stall=False):
global allOK
p1p2 = startDouble(prog1, prog2)
return finishDouble(prog1, p1p2[0], out1, prog2, p1p2[1], out2, stall)
def runWsTest(prog1, out1, url, out2, stall=False):
global allOK
p1p2 = startDouble(prog1, "websockhost a@", url)
os.system(HTMLOPEN + '"http://test.' + LOCALDOMAIN + ':8080/' + url + '"');
return finishDouble(prog1, p1p2[0], out1, "websockhost", p1p2[1],
out2, stall)
def runAllTests():
print("Initial discovery port status ...")
checkports(True, True)
extensions = input("Run pattern match and bundle tests? [y,n]: ")
extensions = "y" in extensions.lower()
websocketsTests = input("Run websocket tests? [y,n]: ")
websocketsTests = "y" in websocketsTests.lower()
print("Running regression tests for O2 ...")
if not runTest("stuniptest", quit_on_port_loss=True): return
if not runTest("dispatchtest"): return
if not runTest("typestest"): return
if not runTest("taptest"): return
if not runTest("coercetest"): return
if not runTest("longtest"): return
if not runTest("arraytest"): return
if not runTest("bridgeapi"): return
if not runTest("o2litemsg"): return
if websocketsTests:
if not runWsTest("o2server - 20t", "SERVER DONE",
"o2client.htm", "WEBSOCKETHOST DONE"): return
if not runWsTest("tappub", "SERVER DONE",
"tapsub.htm", "WEBSOCKETHOST DONE"): return
if not runWsTest("statusclient", "CLIENT DONE",
"statusserver.htm", "WEBSOCKETHOST DONE"): return
if not runWsTest("statusserver", "SERVER DONE",
"statusclient.htm", "WEBSOCKETHOST DONE"): return
if not runWsTest("propsend a", "DONE",
"proprecv.htm", "WEBSOCKETHOST DONE"): return
if not runWsTest("o2litehost 500t da", "CLIENT DONE",
"wsserv.htm", "WEBSOCKETHOST DONE"): return
if not runWsTest("proprecv", "DONE",
"propsend.htm", "WEBSOCKETHOST DONE"): return
if not runWsTest("tapsub", "CLIENT DONE",
"tappub.htm", "WEBSOCKETHOST DONE"): return
if extensions:
if not runTest("bundletest"): return
if not runTest("patterntest"): return
if not runTest("infotest1 o"): return
# proptest returns almost instantly; maybe it takes awhile for the port to be released
if not runTest("proptest s"): return
if not runDouble("o2litehost 500t d", "CLIENT DONE",
"o2liteserv t", "SERVER DONE"): return
if not runDouble("o2litehost 500 d", "CLIENT DONE",
"o2liteserv u", "SERVER DONE"): return
if not runDouble("statusclient", "CLIENT DONE",
"statusserver", "SERVER DONE"): return
if not runDouble("infotest2", "INFOTEST2 DONE",
"clockmirror", "CLOCKMIRROR DONE"): return
if not runDouble("clockref", "CLOCKREF DONE",
"clockmirror", "CLOCKMIRROR DONE"): return
if not runDouble("applead", "APPLEAD DONE",
"appfollow", "APPFOLLOW DONE"): return
if not runDouble("o2client", "CLIENT DONE",
"o2server", "SERVER DONE"): return
# run with TCP instead of UDP
# I should fix the command line arguments here to be more regular:
if not runDouble("o2client 1000t", "CLIENT DONE",
"o2server - 20t", "SERVER DONE"): return
if not runDouble("nonblocksend", "CLIENT DONE",
"nonblockrecv", "SERVER DONE"): return
if not runDouble("o2unblock", "CLIENT DONE",
"o2block", "SERVER DONE"): return
if not runDouble("oscsendtest u", "OSCSEND DONE",
"oscrecvtest u", "OSCRECV DONE"): return
if not runDouble("oscsendtest u", "OSCSEND DONE",
"oscanytest u", "OSCANY DONE"): return
if not runDouble("oscsendtest", "OSCSEND DONE",
"oscrecvtest", "OSCRECV DONE"): return
if not runDouble("tcpclient", "CLIENT DONE",
"tcpserver", "SERVER DONE"): return
if not runDouble("hubclient", "HUBCLIENT DONE",
"hubserver", "HUBSERVER DONE", True): return
if not runDouble("propsend", "DONE",
"proprecv", "DONE"): return
if not runDouble("tappub", "SERVER DONE",
"tapsub", "CLIENT DONE"): return
if not runDouble("unipub", "SERVER DONE",
"unisub", "CLIENT DONE"): return
if not runDouble("dropclient", "DROPCLIENT DONE",
"dropserver", "DROPSERVER DONE"): return
if not runDouble("o2client 1000t", "CLIENT DONE",
"shmemserv u", "SERVER DONE"): return
if extensions:
if not runDouble("oscbndlsend u", "OSCSEND DONE",
"oscbndlrecv u", "OSCRECV DONE", True): return
if not runDouble("oscbndlsend", "OSCSEND DONE",
"oscbndlrecv", "OSCRECV DONE", True): return
# tests for compatibility with liblo are run only if the binaries were built
# In CMake, set BUILD_TESTS_WITH_LIBLO to create the binaries
if os.path.isfile(BIN + '/' + "lo_oscrecv" + EXE):
if not runDouble("oscsendtest @u", "OSCSEND DONE",
"lo_oscrecv u", "OSCRECV DONE"): return
if not runDouble("oscsendtest @", "OSCSEND DONE",
"lo_oscrecv", "OSCRECV DONE"): return
if os.path.isfile(BIN + '/' + "lo_oscsend" + EXE):
if not runDouble("lo_oscsend u", "OSCSEND DONE",
"oscrecvtest u", "OSCRECV DONE"): return
if not runDouble("lo_oscsend", "OSCSEND DONE",
"oscrecvtest", "OSCRECV DONE"): return
if os.path.isfile(BIN + '/' + "lo_bndlsend" + EXE):
if not runDouble("lo_bndlsend u", "OSCSEND DONE",
"oscbndlrecv u", "OSCRECV DONE"): return
if not runDouble("lo_bndlsend", "OSCSEND DONE",
"oscbndlrecv", "OSCRECV DONE"): return
if os.path.isfile(BIN + '/' + "lo_bndlrecv" + EXE):
if not runDouble("oscbndlsend Mu", "OSCSEND DONE",
"lo_bndlrecv u", "OSCRECV DONE"): return
if not runDouble("oscbndlsend M", "OSCSEND DONE",
"lo_bndlrecv", "OSCRECV DONE"): return
runAllTests()
print("stall to recover ports".rjust(30) + ":", end='', flush=True)
dostall()
print()
ports_ok, countmsg = checkports(False, False)
if not ports_ok:
print("ERROR: A port was not freed by some process. " + countmsg + "\n")
elif allOK:
print("**** All O2 regression tests PASSED.")
if not allOK:
print("ERROR: Exiting regression tests because a test failed.")
print(" See above for output from the failing test(s).")
print("\nNOTE: If firewall pop-ups requested access to the network,")
print(" that *might* affect timing and cause a test to fail.")
print(" If you granted access, permission should be granted")
print(" without delay if you run the test again.")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.