input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
"city": "Placentia",
"growth_from_2000_to_2013": "11.8%",
"latitude": 33.8722371,
"longitude": -117.8703363,
"population": "52206",
"rank": "704",
"state": "California",
},
{
"city": "West New York",
"growth_from_2000_to_2013": "13.3%",
"latitude": 40.7878788,
"longitude": -74.0143064,
"population": "52122",
"rank": "705",
"state": "New Jersey",
},
{
"city": "Dublin",
"growth_from_2000_to_2013": "70.0%",
"latitude": 37.7021521,
"longitude": -121.9357918,
"population": "52105",
"rank": "706",
"state": "California",
},
{
"city": "Oak Park",
"growth_from_2000_to_2013": "-0.8%",
"latitude": 41.8850317,
"longitude": -87.7845025,
"population": "52066",
"rank": "707",
"state": "Illinois",
},
{
"city": "Peabody",
"growth_from_2000_to_2013": "7.5%",
"latitude": 42.5278731,
"longitude": -70.9286609,
"population": "52044",
"rank": "708",
"state": "Massachusetts",
},
{
"city": "<NAME>",
"growth_from_2000_to_2013": "9.7%",
"latitude": 40.5067723,
"longitude": -74.2654234,
"population": "51982",
"rank": "709",
"state": "New Jersey",
},
{
"city": "Battle Creek",
"growth_from_2000_to_2013": "-2.8%",
"latitude": 42.3211522,
"longitude": -85.17971419999999,
"population": "51848",
"rank": "710",
"state": "Michigan",
},
{
"city": "Bradenton",
"growth_from_2000_to_2013": "3.4%",
"latitude": 27.4989278,
"longitude": -82.5748194,
"population": "51763",
"rank": "711",
"state": "Florida",
},
{
"city": "Gilroy",
"growth_from_2000_to_2013": "23.9%",
"latitude": 37.0057816,
"longitude": -121.5682751,
"population": "51701",
"rank": "712",
"state": "California",
},
{
"city": "Milford",
"growth_from_2000_to_2013": "1.8%",
"latitude": 41.2306979,
"longitude": -73.064036,
"population": "51644",
"rank": "713",
"state": "Connecticut",
},
{
"city": "Albany",
"growth_from_2000_to_2013": "25.5%",
"latitude": 44.6365107,
"longitude": -123.1059282,
"population": "51583",
"rank": "714",
"state": "Oregon",
},
{
"city": "Ankeny",
"growth_from_2000_to_2013": "86.9%",
"latitude": 41.7317884,
"longitude": -93.6001278,
"population": "51567",
"rank": "715",
"state": "Iowa",
},
{
"city": "La Crosse",
"growth_from_2000_to_2013": "-0.8%",
"latitude": 43.8013556,
"longitude": -91.23958069999999,
"population": "51522",
"rank": "716",
"state": "Wisconsin",
},
{
"city": "Burlington",
"growth_from_2000_to_2013": "12.1%",
"latitude": 36.0956918,
"longitude": -79.43779909999999,
"population": "51510",
"rank": "717",
"state": "North Carolina",
},
{
"city": "DeSoto",
"growth_from_2000_to_2013": "36.0%",
"latitude": 32.5896998,
"longitude": -96.8570738,
"population": "51483",
"rank": "718",
"state": "Texas",
},
{
"city": "Harrisonburg",
"growth_from_2000_to_2013": "27.1%",
"latitude": 38.4495688,
"longitude": -78.8689155,
"population": "51395",
"rank": "719",
"state": "Virginia",
},
{
"city": "Minnetonka",
"growth_from_2000_to_2013": "0.4%",
"latitude": 44.9211836,
"longitude": -93.4687489,
"population": "51368",
"rank": "720",
"state": "Minnesota",
},
{
"city": "Elkhart",
"growth_from_2000_to_2013": "-2.5%",
"latitude": 41.6819935,
"longitude": -85.9766671,
"population": "51265",
"rank": "721",
"state": "Indiana",
},
{
"city": "Lakewood",
"growth_from_2000_to_2013": "-9.4%",
"latitude": 41.4819932,
"longitude": -81.7981908,
"population": "51143",
"rank": "722",
"state": "Ohio",
},
{
"city": "Glendora",
"growth_from_2000_to_2013": "3.1%",
"latitude": 34.1361187,
"longitude": -117.865339,
"population": "51074",
"rank": "723",
"state": "California",
},
{
"city": "Southaven",
"growth_from_2000_to_2013": "72.8%",
"latitude": 34.9889818,
"longitude": -90.0125913,
"population": "50997",
"rank": "724",
"state": "Mississippi",
},
{
"city": "Charleston",
"growth_from_2000_to_2013": "-4.7%",
"latitude": 38.3498195,
"longitude": -81.6326234,
"population": "50821",
"rank": "725",
"state": "West Virginia",
},
{
"city": "Joplin",
"growth_from_2000_to_2013": "11.2%",
"latitude": 37.08422710000001,
"longitude": -94.51328099999999,
"population": "50789",
"rank": "726",
"state": "Missouri",
},
{
"city": "Enid",
"growth_from_2000_to_2013": "8.1%",
"latitude": 36.3955891,
"longitude": -97.8783911,
"population": "50725",
"rank": "727",
"state": "Oklahoma",
},
{
"city": "Palm Beach Gardens",
"growth_from_2000_to_2013": "39.6%",
"latitude": 26.8233946,
"longitude": -80.13865469999999,
"population": "50699",
"rank": "728",
"state": "Florida",
},
{
"city": "Brookhaven",
"growth_from_2000_to_2013": "",
"latitude": 33.8651033,
"longitude": -84.3365917,
"population": "50603",
"rank": "729",
"state": "Georgia",
},
{
"city": "Plainfield",
"growth_from_2000_to_2013": "5.7%",
"latitude": 40.6337136,
"longitude": -74.4073736,
"population": "50588",
"rank": "730",
"state": "New Jersey",
},
{
"city": "Grand Island",
"growth_from_2000_to_2013": "16.0%",
"latitude": 40.9263957,
"longitude": -98.3420118,
"population": "50550",
"rank": "731",
"state": "Nebraska",
},
{
"city": "Palm Desert",
"growth_from_2000_to_2013": "13.2%",
"latitude": 33.7222445,
"longitude": -116.3744556,
"population": "50508",
"rank": "732",
"state": "California",
},
{
"city": "Huntersville",
"growth_from_2000_to_2013": "92.9%",
"latitude": 35.410694,
"longitude": -80.84285040000002,
"population": "50458",
"rank": "733",
"state": "North Carolina",
},
{
"city": "Tigard",
"growth_from_2000_to_2013": "17.8%",
"latitude": 45.4312294,
"longitude": -122.7714861,
"population": "50444",
"rank": "734",
"state": "Oregon",
},
{
"city": "Lenexa",
"growth_from_2000_to_2013": "24.6%",
"latitude": 38.9536174,
"longitude": -94.73357089999999,
"population": "50344",
"rank": "735",
"state": "Kansas",
},
{
"city": "Saginaw",
"growth_from_2000_to_2013": "-18.2%",
"latitude": 43.4194699,
"longitude": -83.9508068,
"population": "50303",
"rank": "736",
"state": "Michigan",
},
{
"city": "Kentwood",
"growth_from_2000_to_2013": "10.5%",
"latitude": 42.8694731,
"longitude": -85.64474919999999,
"population": "50233",
"rank": "737",
"state": "Michigan",
},
{
"city": "Doral",
"growth_from_2000_to_2013": "137.6%",
"latitude": 25.8195424,
"longitude": -80.3553302,
"population": "50213",
"rank": "738",
"state": "Florida",
},
{
"city": "Apple Valley",
"growth_from_2000_to_2013": "9.2%",
"latitude": 44.7319094,
"longitude": -93.21772000000001,
"population": "50201",
"rank": "739",
"state": "Minnesota",
},
{
"city": "Grapevine",
"growth_from_2000_to_2013": "17.6%",
"latitude": 32.9342919,
"longitude": -97.0780654,
"population": "50195",
"rank": "740",
"state": "Texas",
},
{
"city": "<NAME>",
"growth_from_2000_to_2013": "25.4%",
"latitude": 33.5676842,
"longitude": -117.7256083,
"population": "50175",
"rank": "741",
"state": "California",
},
{
"city": "Sammamish",
"growth_from_2000_to_2013": "44.1%",
"latitude": 47.61626829999999,
"longitude": -122.0355736,
"population": "50169",
"rank": "742",
"state": "Washington",
},
{
"city": "Casa Grande",
"growth_from_2000_to_2013": "86.0%",
"latitude": 32.8795022,
"longitude": -111.7573521,
"population": "50111",
"rank": "743",
"state": "Arizona",
},
{
"city": "Pinellas Park",
"growth_from_2000_to_2013": "5.9%",
"latitude": 27.8428025,
"longitude": -82.6995443,
"population": "49998",
"rank": "744",
"state": "Florida",
},
{
"city": "Troy",
"growth_from_2000_to_2013": "1.5%",
"latitude": 42.7284117,
"longitude": -73.69178509999999,
"population": "49974",
"rank": "745",
"state": "New York",
},
{
"city": "West Sacramento",
"growth_from_2000_to_2013": "55.6%",
"latitude": 38.5804609,
"longitude": -121.530234,
"population": "49891",
"rank": "746",
"state": "California",
},
{
"city": "Burien",
"growth_from_2000_to_2013": "56.7%",
"latitude": 47.4703767,
"longitude": -122.3467918,
"population": "49858",
"rank": "747",
"state": "Washington",
},
{
"city": "Commerce City",
"growth_from_2000_to_2013": "135.4%",
"latitude": 39.8083196,
"longitude": -104.9338675,
"population": "49799",
"rank": "748",
"state": "Colorado",
},
{
"city": "Monroe",
"growth_from_2000_to_2013": "-6.1%",
"latitude": 32.5093109,
"longitude": -92.1193012,
"population": "49761",
"rank": "749",
"state": "Louisiana",
},
{
"city": "Cerritos",
"growth_from_2000_to_2013": "-3.6%",
"latitude": 33.8583483,
"longitude": -118.0647871,
"population": "49707",
"rank": "750",
"state": "California",
},
{
"city": "Downers Grove",
"growth_from_2000_to_2013": "0.0%",
"latitude": 41.8089191,
"longitude": -88.01117459999999,
"population": "49670",
"rank": "751",
"state": "Illinois",
},
{
"city": "Coral Gables",
"growth_from_2000_to_2013": "16.1%",
"latitude": 25.72149,
"longitude": -80.2683838,
"population": "49631",
"rank": "752",
"state": "Florida",
},
{
"city": "Wilson",
"growth_from_2000_to_2013": "10.1%",
"latitude": 35.7212689,
"longitude": -77.9155395,
"population": "49628",
"rank": "753",
"state": "North Carolina",
},
{
"city": "Niagara Falls",
"growth_from_2000_to_2013": "-10.8%",
"latitude": 43.0962143,
"longitude": -79.0377388,
"population": "49468",
"rank": "754",
"state": "New York",
},
{
"city": "Poway",
"growth_from_2000_to_2013": "2.4%",
"latitude": 32.9628232,
"longitude": -117.0358646,
"population": "49417",
"rank": "755",
"state": "California",
},
{
"city": "Edina",
"growth_from_2000_to_2013": "4.1%",
"latitude": 44.8896866,
"longitude": -93.3499489,
"population": "49376",
"rank": "756",
"state": "Minnesota",
},
{
"city": "Cuyahoga Falls",
"growth_from_2000_to_2013": "-0.2%",
"latitude": 41.1339449,
"longitude": -81.48455849999999,
"population": "49267",
"rank": "757",
"state": "Ohio",
},
{
"city": "Rancho Santa Margarita",
"growth_from_2000_to_2013": "4.6%",
"latitude": 33.640855,
"longitude": -117.603104,
"population": "49228",
"rank": "758",
"state": "California",
},
{
"city": "Harrisburg",
"growth_from_2000_to_2013": "0.6%",
"latitude": 40.2731911,
"longitude": -76.8867008,
"population": "49188",
"rank": "759",
"state": "Pennsylvania",
},
{
"city": "Huntington",
"growth_from_2000_to_2013": "-5.0%",
"latitude": 38.4192496,
"longitude": -82.44515400000002,
"population": "49177",
"rank": "760",
"state": "West Virginia",
},
{
"city": "La Mirada",
"growth_from_2000_to_2013": "4.6%",
"latitude": 33.9172357,
"longitude": -118.0120086,
"population": "49133",
"rank": "761",
"state": "California",
},
{
"city": "Cypress",
"growth_from_2000_to_2013": "5.3%",
"latitude": 33.8169599,
"longitude": -118.0372852,
"population": "49087",
"rank": "762",
"state": "California",
},
{
"city": "Caldwell",
"growth_from_2000_to_2013": "77.1%",
"latitude": 43.66293839999999,
"longitude": -116.6873596,
"population": "48957",
"rank": "763",
"state": "Idaho",
},
{
"city": "Logan",
"growth_from_2000_to_2013": "14.5%",
"latitude": 41.7369803,
"longitude": -111.8338359,
"population": "48913",
"rank": "764",
"state": "Utah",
},
{
"city": "Galveston",
"growth_from_2000_to_2013": "-15.2%",
"latitude": 29.3013479,
"longitude": -94.7976958,
"population": "48733",
"rank": "765",
"state": "Texas",
},
{
"city": "Sheboygan",
"growth_from_2000_to_2013": "-3.9%",
"latitude": 43.7508284,
"longitude": -87.71453,
"population": "48725",
"rank": "766",
"state": "Wisconsin",
},
{
"city": "Middletown",
"growth_from_2000_to_2013": "-5.7%",
"latitude": 39.5150576,
"longitude": -84.39827629999999,
"population": "48630",
"rank": "767",
"state": "Ohio",
},
{
"city": "Murray",
"growth_from_2000_to_2013": "6.6%",
"latitude": 40.6668916,
"longitude": -111.8879909,
"population": "48612",
"rank": "768",
"state": "Utah",
},
{
"city": "Roswell",
"growth_from_2000_to_2013": "7.5%",
"latitude": 33.3942655,
"longitude": -104.5230242,
"population": "48611",
"rank": "769",
"state": "New Mexico",
},
{
"city": "Parker",
"growth_from_2000_to_2013": "96.4%",
"latitude": 39.5186002,
"longitude": -104.7613633,
"population": "48608",
"rank": "770",
"state": "Colorado",
},
{
"city": "Bedford",
"growth_from_2000_to_2013": "2.9%",
"latitude": 32.844017,
"longitude": -97.1430671,
"population": "48592",
"rank": "771",
"state": "Texas",
},
{
"city": "East Lansing",
"growth_from_2000_to_2013": "4.2%",
"latitude": 42.7369792,
"longitude": -84.48386540000001,
"population": "48554",
"rank": "772",
"state": "Michigan",
},
{
"city": "Methuen",
"growth_from_2000_to_2013": "10.3%",
"latitude": 42.7262016,
"longitude": -71.1908924,
"population": "48514",
"rank": "773",
"state": "Massachusetts",
},
{
"city": "Covina",
"growth_from_2000_to_2013": "3.3%",
"latitude": 34.0900091,
"longitude": -117.8903397,
"population": "48508",
"rank": "774",
"state": "California",
},
{
"city": "Alexandria",
"growth_from_2000_to_2013": "4.1%",
"latitude": 31.3112936,
"longitude": -92.4451371,
"population": "48426",
"rank": "775",
"state": "Louisiana",
},
{
"city": "Olympia",
"growth_from_2000_to_2013": "12.1%",
"latitude": 47.0378741,
"longitude": -122.9006951,
"population": "48338",
"rank": "776",
"state": "Washington",
},
{
"city": "Euclid",
"growth_from_2000_to_2013": "-8.4%",
"latitude": 41.5931049,
"longitude": -81.5267873,
"population": "48139",
"rank": "777",
"state": "Ohio",
},
{
"city": "Mishawaka",
"growth_from_2000_to_2013": "2.0%",
"latitude": 41.6619927,
"longitude": -86.15861559999999,
"population": "47989",
"rank": "778",
"state": "Indiana",
},
{
"city": "Salina",
"growth_from_2000_to_2013": "4.5%",
"latitude": 38.8402805,
"longitude": -97.61142369999999,
"population": "47846",
"rank": "779",
"state": "Kansas",
},
{
"city": "Azusa",
"growth_from_2000_to_2013": "6.7%",
"latitude": 34.1336186,
"longitude": -117.9075627,
"population": "47842",
"rank": "780",
"state": "California",
},
{
"city": "Newark",
"growth_from_2000_to_2013": "3.1%",
"latitude": 40.0581205,
"longitude": -82.4012642,
"population": "47777",
"rank": "781",
"state": "Ohio",
},
| |
33, -3, -5): (1, 1),
(4, 33, -3, -4): (-1, 1),
(4, 33, -3, -3): (0, 1),
(4, 33, -3, -2): (0, 1),
(4, 33, -3, -1): (0, 1),
(4, 33, -3, 0): (0, 1),
(4, 33, -3, 1): (0, 1),
(4, 33, -3, 2): (0, 0),
(4, 33, -3, 3): (-1, -1),
(4, 33, -3, 4): (0, 1),
(4, 33, -3, 5): (0, 1),
(4, 33, -2, -5): (0, 1),
(4, 33, -2, -4): (0, 1),
(4, 33, -2, -3): (0, 1),
(4, 33, -2, -2): (-1, 1),
(4, 33, -2, -1): (-1, 1),
(4, 33, -2, 0): (-1, 1),
(4, 33, -2, 1): (-1, 1),
(4, 33, -2, 2): (-1, 0),
(4, 33, -2, 3): (-1, -1),
(4, 33, -2, 4): (0, 1),
(4, 33, -2, 5): (0, 1),
(4, 33, -1, -5): (-1, 1),
(4, 33, -1, -4): (-1, 1),
(4, 33, -1, -3): (-1, 1),
(4, 33, -1, -2): (0, 1),
(4, 33, -1, -1): (-1, 1),
(4, 33, -1, 0): (-1, 1),
(4, 33, -1, 1): (-1, 0),
(4, 33, -1, 2): (-1, -1),
(4, 33, -1, 3): (-1, 1),
(4, 33, -1, 4): (-1, 1),
(4, 33, -1, 5): (-1, 1),
(4, 33, 0, -5): (0, 1),
(4, 33, 0, -4): (-1, 1),
(4, 33, 0, -3): (-1, 1),
(4, 33, 0, -2): (-1, 1),
(4, 33, 0, -1): (-1, 1),
(4, 33, 0, 0): (-1, 0),
(4, 33, 0, 1): (-1, -1),
(4, 33, 0, 2): (-1, -1),
(4, 33, 0, 3): (-1, -1),
(4, 33, 0, 4): (-1, 1),
(4, 33, 0, 5): (-1, 1),
(4, 33, 1, -5): (0, 1),
(4, 33, 1, -4): (0, 1),
(4, 33, 1, -3): (-1, 1),
(4, 33, 1, -2): (-1, 1),
(4, 33, 1, -1): (-1, 1),
(4, 33, 1, 0): (-1, 1),
(4, 33, 1, 1): (-1, 0),
(4, 33, 1, 2): (-1, 1),
(4, 33, 1, 3): (-1, 1),
(4, 33, 1, 4): (-1, 1),
(4, 33, 1, 5): (-1, 1),
(4, 33, 2, -5): (-1, 1),
(4, 33, 2, -4): (-1, 1),
(4, 33, 2, -3): (-1, 1),
(4, 33, 2, -2): (-1, 0),
(4, 33, 2, -1): (-1, -1),
(4, 33, 2, 0): (-1, 1),
(4, 33, 2, 1): (-1, 1),
(4, 33, 2, 2): (-1, 1),
(4, 33, 2, 3): (-1, 1),
(4, 33, 2, 4): (-1, 1),
(4, 33, 2, 5): (-1, 1),
(4, 33, 3, -5): (0, 1),
(4, 33, 3, -4): (0, 1),
(4, 33, 3, -3): (0, 1),
(4, 33, 3, -2): (0, 0),
(4, 33, 3, -1): (0, 1),
(4, 33, 3, 0): (-1, 1),
(4, 33, 3, 1): (-1, 1),
(4, 33, 3, 2): (-1, 1),
(4, 33, 3, 3): (-1, 1),
(4, 33, 3, 4): (-1, 1),
(4, 33, 3, 5): (-1, 1),
(4, 33, 4, -5): (0, 1),
(4, 33, 4, -4): (0, 1),
(4, 33, 4, -3): (0, 1),
(4, 33, 4, -2): (0, 0),
(4, 33, 4, -1): (0, 1),
(4, 33, 4, 0): (0, 1),
(4, 33, 4, 1): (0, 1),
(4, 33, 4, 2): (-1, 1),
(4, 33, 4, 3): (-1, 1),
(4, 33, 4, 4): (-1, 1),
(4, 33, 4, 5): (-1, 1),
(4, 33, 5, -5): (0, 1),
(4, 33, 5, -4): (0, 1),
(4, 33, 5, -3): (0, 1),
(4, 33, 5, -2): (0, 0),
(4, 33, 5, -1): (0, 1),
(4, 33, 5, 0): (0, 1),
(4, 33, 5, 1): (0, 1),
(4, 33, 5, 2): (-1, 1),
(4, 33, 5, 3): (-1, 1),
(4, 33, 5, 4): (-1, 1),
(4, 33, 5, 5): (-1, 1),
(4, 34, -5, -5): (0, 1),
(4, 34, -5, -4): (0, 1),
(4, 34, -5, -3): (0, 1),
(4, 34, -5, -2): (0, 1),
(4, 34, -5, -1): (0, 1),
(4, 34, -5, 0): (0, 1),
(4, 34, -5, 1): (0, 0),
(4, 34, -5, 2): (-1, -1),
(4, 34, -5, 3): (0, 1),
(4, 34, -5, 4): (0, 1),
(4, 34, -5, 5): (0, 1),
(4, 34, -4, -5): (0, 1),
(4, 34, -4, -4): (0, 1),
(4, 34, -4, -3): (0, 1),
(4, 34, -4, -2): (0, 1),
(4, 34, -4, -1): (0, 1),
(4, 34, -4, 0): (0, 1),
(4, 34, -4, 1): (0, 0),
(4, 34, -4, 2): (-1, -1),
(4, 34, -4, 3): (0, 1),
(4, 34, -4, 4): (0, 1),
(4, 34, -4, 5): (0, 1),
(4, 34, -3, -5): (-1, 1),
(4, 34, -3, -4): (0, 1),
(4, 34, -3, -3): (0, 1),
(4, 34, -3, -2): (0, 1),
(4, 34, -3, -1): (0, 1),
(4, 34, -3, 0): (0, 1),
(4, 34, -3, 1): (0, 0),
(4, 34, -3, 2): (-1, -1),
(4, 34, -3, 3): (0, 1),
(4, 34, -3, 4): (0, 1),
(4, 34, -3, 5): (0, 1),
(4, 34, -2, -5): (0, 1),
(4, 34, -2, -4): (0, 1),
(4, 34, -2, -3): (-1, 1),
(4, 34, -2, -2): (-1, 1),
(4, 34, -2, -1): (-1, 1),
(4, 34, -2, 0): (-1, 1),
(4, 34, -2, 1): (-1, 0),
(4, 34, -2, 2): (-1, -1),
(4, 34, -2, 3): (0, 1),
(4, 34, -2, 4): (0, 1),
(4, 34, -2, 5): (0, 1),
(4, 34, -1, -5): (-1, 1),
(4, 34, -1, -4): (-1, 1),
(4, 34, -1, -3): (-1, 0),
(4, 34, -1, -2): (-1, 1),
(4, 34, -1, -1): (-1, 1),
(4, 34, -1, 0): (-1, 0),
(4, 34, -1, 1): (-1, -1),
(4, 34, -1, 2): (-1, 1),
(4, 34, -1, 3): (-1, 1),
(4, 34, -1, 4): (-1, 1),
(4, 34, -1, 5): (-1, 1),
(4, 34, 0, -5): (-1, 1),
(4, 34, 0, -4): (-1, 1),
(4, 34, 0, -3): (-1, 1),
(4, 34, 0, -2): (-1, 1),
(4, 34, 0, -1): (-1, 1),
(4, 34, 0, 0): (-1, 0),
(4, 34, 0, 1): (-1, -1),
(4, 34, 0, 2): (-1, -1),
(4, 34, 0, 3): (-1, 1),
(4, 34, 0, 4): (-1, 1),
(4, 34, 0, 5): (-1, 1),
(4, 34, 1, -5): (0, 1),
(4, 34, 1, -4): (-1, 1),
(4, 34, 1, -3): (-1, 0),
(4, 34, 1, -2): (-1, 1),
(4, 34, 1, -1): (-1, 1),
(4, 34, 1, 0): (-1, 1),
(4, 34, 1, 1): (-1, 1),
(4, 34, 1, 2): (-1, 1),
(4, 34, 1, 3): (-1, 1),
(4, 34, 1, 4): (-1, 1),
(4, 34, 1, 5): (-1, 1),
(4, 34, 2, -5): (-1, 1),
(4, 34, 2, -4): (-1, 1),
(4, 34, 2, -3): (-1, 0),
(4, 34, 2, -2): (-1, -1),
(4, 34, 2, -1): (-1, 1),
(4, 34, 2, 0): (-1, 1),
(4, 34, 2, 1): (-1, 1),
(4, 34, 2, 2): (-1, 1),
(4, 34, 2, 3): (-1, 1),
(4, 34, 2, 4): (-1, 1),
(4, 34, 2, 5): (-1, 1),
(4, 34, 3, -5): (0, 1),
(4, 34, 3, -4): (0, 1),
(4, 34, 3, -3): (0, 1),
(4, 34, 3, -2): (0, 1),
(4, 34, 3, -1): (0, 1),
(4, 34, 3, 0): (-1, 1),
(4, 34, 3, 1): (-1, 1),
(4, 34, 3, 2): (-1, 1),
(4, 34, 3, 3): (-1, 1),
(4, 34, 3, 4): (-1, 1),
(4, 34, 3, 5): (-1, 1),
(4, 34, 4, -5): (0, 1),
(4, 34, 4, -4): (0, 1),
(4, 34, 4, -3): (0, 1),
(4, 34, 4, -2): (0, 1),
(4, 34, 4, -1): (0, 1),
(4, 34, 4, 0): (0, 1),
(4, 34, 4, 1): (-1, 1),
(4, 34, 4, 2): (-1, 1),
(4, 34, 4, 3): (-1, 1),
(4, 34, 4, 4): (-1, 1),
(4, 34, 4, 5): (-1, 1),
(4, 34, 5, -5): (0, 1),
(4, 34, 5, -4): (0, 1),
(4, 34, 5, -3): (0, 1),
(4, 34, 5, -2): (0, 1),
(4, 34, 5, | |
338, 772, 629, 726, 296, 672,
282, 268, 741, 330, 272, 217, 188, 754, 875,
58],
[869, 349, 328, 585, 442, 812, 645, 854, 317,
437, 314, 343, 571, 202, 534, 22, 307, 874,
859],
[161, 579, 865, 703, 276, 889, 374, 792, 123,
668, 970, 737, 846, 416, 704, 204, 660, 223,
509],
[243, 645, 359, 427, 636, 193, 663, 857, 712,
510, 367, 862, 352, 715, 811, 986, 292, 391,
475],
[645, 767, 117, 907, 321, 906, 592, 508, 647,
289, 307, 519, 425, 659, 219, 459, 537, 505,
328],
[743, 74, 374, 226, 356, 28, 5, 215, 459, 232,
18, 123, 308, 277, 490, 345, 68, 763, 93],
[21, 927, 770, 760, 75, 751, 387, 686, 366, 108,
327, 196, 603, 676, 337, 59, 799, 41, 699],
[777, 779, 755, 647, 718, 144, 749, 35, 282,
233, 552, 936, 391, 140, 877, 874, 472, 86,
836],
[966, 63, 26, 21, 595, 325, 521, 636, 481, 485,
664, 897, 151, 132, 969, 967, 856, 953, 425],
[352, 849, 157, 520, 272, 9, 934, 441, 261, 380,
868, 260, 375, 547, 699, 924, 794, 617, 222],
[559, 901, 435, 537, 620, 779, 708, 848, 903,
701, 570, 115, 114, 342, 57, 878, 278, 697,
629],
[541, 502, 381, 168, 792, 268, 21, 59, 581, 691,
695, 906, 616, 808, 366, 804, 36, 210, 295],
[662, 142, 571, 207, 905, 913, 414, 341, 956,
602, 115, 78, 688, 67, 148, 92, 930, 68, 258],
[301, 970, 837, 91, 679, 574, 119, 324, 554,
233, 617, 382, 876, 516, 380, 584, 516, 911,
331],
[894, 637, 193, 54, 14, 503, 221, 127, 118, 565,
234, 828, 753, 97, 257, 619, 811, 803, 934]]),
[385, 928, 460, 539, 984, 516, 609, 769, 825, 857, 819,
422, 989, 319, 60, 450, 495, 64, 624, 410, 798, 331,
58, 859, 509, 475, 328, 93, 699, 836, 425, 222, 629,
295, 258, 331, 934, 803, 811, 619, 257, 97, 753, 828,
234, 565, 118, 127, 221, 503, 14, 54, 193, 637, 894,
301, 662, 541, 559, 352, 966, 777, 21, 743, 645, 243,
161, 869, 529, 564, 703, 604, 59, 272, 470, 997, 980,
563, 632, 353, 366, 750, 740, 395, 978, 995, 848, 72,
820, 471, 278, 875, 874, 223, 391, 505, 763, 41, 86,
953, 617, 697, 210, 68, 911, 516, 584, 380, 516, 876,
382, 617, 233, 554, 324, 119, 574, 679, 91, 837, 970,
142, 502, 901, 849, 63, 779, 927, 74, 767, 645, 579,
349, 354, 846, 427, 351, 469, 685, 297, 362, 947, 998,
434, 896, 773, 441, 562, 785, 704, 529, 398, 754, 307,
660, 292, 537, 68, 799, 472, 856, 794, 278, 36, 930,
92, 148, 67, 688, 78, 115, 602, 956, 341, 414, 913,
905, 207, 571, 381, 435, 157, 26, 755, 770, 374, 117,
359, 865, 328, 643, 756, 916, 435, 184, 785, 930, 349,
161, 253, 365, 82, 976, 499, 461, 188, 22, 204, 986,
459, 345, 59, 874, 967, 924, 878, 804, 366, 808, 616,
906, 695, 691, 581, 59, 21, 268, 792, 168, 537, 520,
21, 647, 760, 226, 907, 427, 703, 585, 338, 772, 629,
726, 296, 672, 282, 268, 741, 330, 272, 217, 534, 704,
811, 219, 490, 337, 877, 969, 699, 57, 342, 114, 115,
570, 701, 903, 848, 708, 779, 620, 272, 595, 718, 75,
356, 321, 636, 276, 442, 812, 645, 854, 317, 437, 314,
343, 571, 202, 416, 715, 659, 277, 676, 140, 132, 547,
375, 260, 868, 380, 261, 441, 934, 9, 325, 144, 751,
28, 906, 193, 889, 374, 792, 123, 668, 970, 737, 846,
352, 425, 308, 603, 391, 151, 897, 664, 485, 481, 636,
521, 749, 387, 5, 592, 663, 857, 712, 510, 367, 862,
519, 123, 196, 936, 552, 233, 282, 35, 686, 215, 508,
647, 289, 307, 18, 327, 108, 366, 459, 232])
def test_snail_077(self):
self.assertEqual(snail([[666, 962, 235, 436, 68, 11, 222, 412, 346, 108,
83, 505, 615, 899, 111, 149, 740, 452, 988,
476],
[546, 18, 303, 148, 420, 385, 556, 547, 944,
980, 346, 821, 402, 114, 287, 328, 884, 420,
476, 327],
[586, 711, 282, 581, 620, 649, 276, 979, 359,
916, 897, 797, 676, 359, 510, 229, 621, 782,
559, 406],
[888, 758, 801, 266, 597, 509, 541, 501, 301,
109, 298, 676, 542, 803, 434, 40, 601, 224, 72,
387],
[402, 960, 825, 515, 400, 282, 102, 787, 226,
256, 446, 116, 926, 868, 497, 885, 645, 228,
37, 263],
[589, 332, 700, 507, 657, 509, 28, 46, 60, 615,
43, 439, 545, 382, 249, 1, 511, 411, 369, 336],
[470, 14, 533, 919, 248, 40, 292, 559, 970, 850,
609, 202, 315, 100, 52, 467, 332, 666, 620,
145],
[117, 906, 282, 526, 168, 206, 689, 213, 207,
78, 270, 186, 877, 744, 191, 86, 56, 626, 47,
777],
[491, 902, 689, 519, 278, 647, 890, 903, 351,
125, 873, 92, 510, 765, 213, 298, 972, 42, 667,
61],
[689, 759, 825, 676, 249, 697, 684, 112, 347,
73, 863, 91, 150, 311, 140, 814, 984, 838, 458,
505],
[176, 115, 727, 603, 981, 695, 255, 165, 433,
82, 576, 392, 401, 736, 469, 685, 684, 473,
599, 275],
[400, 527, 489, 949, 267, 523, 711, 642, 204,
140, 298, 162, 730, 26, 745, 748, 641, 378,
187, 208],
[424, 742, 633, 608, 645, 642, 876, 276, 408,
985, 695, 3, 772, 967, 436, 422, 333, 626, 980,
279],
[363, 401, 873, 167, 355, 259, 678, 424, 558,
957, 171, 284, 664, 517, 855, 849, 112, 470,
331, 112],
[353, 257, 463, 706, 552, 957, 255, 596, 453,
950, 352, 914, 493, 798, 735, 633, 747, 552,
368, 547],
[524, 688, 975, 145, 704, 232, 190, 483, 617,
262, 882, 782, 5, 345, 285, 483, 325, 321, 866,
806],
[99, 972, 262, 332, 81, 103, 425, 156, 240, 599,
508, 755, 783, 585, 354, 515, 694, 638, 22,
815],
[789, 616, 172, 544, 827, 862, 286, 844, 376,
844, 508, 320, 675, 197, 350, 545, 505, 78,
155, 606],
[203, 167, 992, 723, 682, 83, 534, 315, 376, 89,
267, 107, 346, 924, 306, 752, 627, 496, 994,
613],
[581, 737, 393, 879, 406, 15, 265, 238, 125,
683, 505, 835, 174, 509, 284, 12, 364, 345,
395, 1]]),
[666, 962, 235, 436, 68, 11, 222, 412, 346, 108, 83,
505, 615, 899, 111, 149, 740, 452, 988, 476, 327, 406,
387, 263, 336, 145, 777, 61, 505, 275, 208, 279, 112,
547, 806, 815, 606, 613, 1, 395, 345, 364, 12, 284,
509, 174, 835, 505, 683, 125, 238, 265, 15, 406, 879,
393, 737, 581, 203, 789, 99, 524, 353, 363, 424, 400,
176, 689, 491, 117, 470, 589, 402, 888, 586, 546, 18,
303, 148, 420, 385, 556, 547, 944, 980, 346, 821, 402,
114, 287, 328, 884, 420, 476, 559, 72, 37, 369, 620,
47, 667, 458, 599, 187, 980, 331, 368, 866, 22, 155,
994, 496, 627, 752, 306, 924, 346, 107, 267, 89, 376,
315, 534, 83, 682, 723, 992, 167, 616, 972, 688, 257,
401, 742, 527, 115, 759, 902, 906, 14, 332, 960, 758,
711, 282, 581, 620, 649, 276, 979, 359, 916, 897, 797,
676, 359, 510, 229, 621, 782, 224, 228, 411, 666, 626,
42, 838, 473, 378, 626, 470, 552, 321, 638, 78, 505,
545, 350, 197, 675, 320, 508, 844, 376, 844, 286, 862,
827, 544, 172, 262, 975, 463, 873, 633, 489, 727, 825,
689, 282, 533, 700, 825, 801, 266, 597, 509, 541, 501,
301, 109, 298, 676, 542, 803, 434, 40, 601, 645, 511,
332, 56, 972, 984, 684, 641, 333, 112, 747, 325, 694,
515, 354, 585, | |
used as the '
'file\n'
'for output.\n',
'raise': '\n'
'The "raise" statement\n'
'*********************\n'
'\n'
' raise_stmt ::= "raise" [expression ["," expression ["," '
'expression]]]\n'
'\n'
'If no expressions are present, "raise" re-raises the last '
'exception\n'
'that was active in the current scope. If no exception is active '
'in\n'
'the current scope, a "TypeError" exception is raised indicating '
'that\n'
'this is an error (if running under IDLE, a "Queue.Empty" exception '
'is\n'
'raised instead).\n'
'\n'
'Otherwise, "raise" evaluates the expressions to get three objects,\n'
'using "None" as the value of omitted expressions. The first two\n'
'objects are used to determine the *type* and *value* of the '
'exception.\n'
'\n'
'If the first object is an instance, the type of the exception is '
'the\n'
'class of the instance, the instance itself is the value, and the\n'
'second object must be "None".\n'
'\n'
'If the first object is a class, it becomes the type of the '
'exception.\n'
'The second object is used to determine the exception value: If it '
'is\n'
'an instance of the class, the instance becomes the exception value. '
'If\n'
'the second object is a tuple, it is used as the argument list for '
'the\n'
'class constructor; if it is "None", an empty argument list is '
'used,\n'
'and any other object is treated as a single argument to the\n'
'constructor. The instance so created by calling the constructor '
'is\n'
'used as the exception value.\n'
'\n'
'If a third object is present and not "None", it must be a '
'traceback\n'
'object (see section The standard type hierarchy), and it is\n'
'substituted instead of the current location as the place where the\n'
'exception occurred. If the third object is present and not a\n'
'traceback object or "None", a "TypeError" exception is raised. '
'The\n'
'three-expression form of "raise" is useful to re-raise an '
'exception\n'
'transparently in an except clause, but "raise" with no expressions\n'
'should be preferred if the exception to be re-raised was the most\n'
'recently active exception in the current scope.\n'
'\n'
'Additional information on exceptions can be found in section\n'
'Exceptions, and information about handling exceptions is in '
'section\n'
'The try statement.\n',
'return': '\n'
'The "return" statement\n'
'**********************\n'
'\n'
' return_stmt ::= "return" [expression_list]\n'
'\n'
'"return" may only occur syntactically nested in a function '
'definition,\n'
'not within a nested class definition.\n'
'\n'
'If an expression list is present, it is evaluated, else "None" is\n'
'substituted.\n'
'\n'
'"return" leaves the current function call with the expression list '
'(or\n'
'"None") as return value.\n'
'\n'
'When "return" passes control out of a "try" statement with a '
'"finally"\n'
'clause, that "finally" clause is executed before really leaving '
'the\n'
'function.\n'
'\n'
'In a generator function, the "return" statement is not allowed to\n'
'include an "expression_list". In that context, a bare "return"\n'
'indicates that the generator is done and will cause '
'"StopIteration" to\n'
'be raised.\n',
'sequence-types': '\n'
'Emulating container types\n'
'*************************\n'
'\n'
'The following methods can be defined to implement '
'container objects.\n'
'Containers usually are sequences (such as lists or tuples) '
'or mappings\n'
'(like dictionaries), but can represent other containers as '
'well. The\n'
'first set of methods is used either to emulate a sequence '
'or to\n'
'emulate a mapping; the difference is that for a sequence, '
'the\n'
'allowable keys should be the integers *k* for which "0 <= '
'k < N" where\n'
'*N* is the length of the sequence, or slice objects, which '
'define a\n'
'range of items. (For backwards compatibility, the method\n'
'"__getslice__()" (see below) can also be defined to handle '
'simple, but\n'
'not extended slices.) It is also recommended that mappings '
'provide the\n'
'methods "keys()", "values()", "items()", "has_key()", '
'"get()",\n'
'"clear()", "setdefault()", "iterkeys()", "itervalues()",\n'
'"iteritems()", "pop()", "popitem()", "copy()", and '
'"update()" behaving\n'
"similar to those for Python's standard dictionary "
'objects. The\n'
'"UserDict" module provides a "DictMixin" class to help '
'create those\n'
'methods from a base set of "__getitem__()", '
'"__setitem__()",\n'
'"__delitem__()", and "keys()". Mutable sequences should '
'provide\n'
'methods "append()", "count()", "index()", "extend()", '
'"insert()",\n'
'"pop()", "remove()", "reverse()" and "sort()", like Python '
'standard\n'
'list objects. Finally, sequence types should implement '
'addition\n'
'(meaning concatenation) and multiplication (meaning '
'repetition) by\n'
'defining the methods "__add__()", "__radd__()", '
'"__iadd__()",\n'
'"__mul__()", "__rmul__()" and "__imul__()" described '
'below; they\n'
'should not define "__coerce__()" or other numerical '
'operators. It is\n'
'recommended that both mappings and sequences implement '
'the\n'
'"__contains__()" method to allow efficient use of the "in" '
'operator;\n'
'for mappings, "in" should be equivalent of "has_key()"; '
'for sequences,\n'
'it should search through the values. It is further '
'recommended that\n'
'both mappings and sequences implement the "__iter__()" '
'method to allow\n'
'efficient iteration through the container; for mappings, '
'"__iter__()"\n'
'should be the same as "iterkeys()"; for sequences, it '
'should iterate\n'
'through the values.\n'
'\n'
'object.__len__(self)\n'
'\n'
' Called to implement the built-in function "len()". '
'Should return\n'
' the length of the object, an integer ">=" 0. Also, an '
'object that\n'
' doesn\'t define a "__nonzero__()" method and whose '
'"__len__()"\n'
' method returns zero is considered to be false in a '
'Boolean context.\n'
'\n'
' **CPython implementation detail:** In CPython, the '
'length is\n'
' required to be at most "sys.maxsize". If the length is '
'larger than\n'
' "sys.maxsize" some features (such as "len()") may '
'raise\n'
' "OverflowError". To prevent raising "OverflowError" by '
'truth value\n'
' testing, an object must define a "__nonzero__()" '
'method.\n'
'\n'
'object.__getitem__(self, key)\n'
'\n'
' Called to implement evaluation of "self[key]". For '
'sequence types,\n'
' the accepted keys should be integers and slice '
'objects. Note that\n'
' the special interpretation of negative indexes (if the '
'class wishes\n'
' to emulate a sequence type) is up to the '
'"__getitem__()" method. If\n'
' *key* is of an inappropriate type, "TypeError" may be '
'raised; if of\n'
' a value outside the set of indexes for the sequence '
'(after any\n'
' special interpretation of negative values), '
'"IndexError" should be\n'
' raised. For mapping types, if *key* is missing (not in '
'the\n'
' container), "KeyError" should be raised.\n'
'\n'
' Note: "for" loops expect that an "IndexError" will be '
'raised for\n'
' illegal indexes to allow proper detection of the end '
'of the\n'
' sequence.\n'
'\n'
'object.__missing__(self, key)\n'
'\n'
' Called by "dict"."__getitem__()" to implement '
'"self[key]" for dict\n'
' subclasses when key is not in the dictionary.\n'
'\n'
'object.__setitem__(self, key, value)\n'
'\n'
' Called to implement assignment to "self[key]". Same '
'note as for\n'
' "__getitem__()". This should only be implemented for '
'mappings if\n'
' the objects support changes to the values for keys, or '
'if new keys\n'
' can be added, or for sequences if elements can be '
'replaced. The\n'
' same exceptions should be raised for improper *key* '
'values as for\n'
' the "__getitem__()" method.\n'
'\n'
'object.__delitem__(self, key)\n'
'\n'
' Called to implement deletion of "self[key]". Same note '
'as for\n'
' "__getitem__()". This should only be implemented for '
'mappings if\n'
' the objects support removal of keys, or for sequences '
'if elements\n'
' can be removed from the sequence. The same exceptions '
'should be\n'
' raised for improper *key* values as for the '
'"__getitem__()" method.\n'
'\n'
'object.__iter__(self)\n'
'\n'
' This method is called when an iterator is required for '
'a container.\n'
' This method should return a new iterator object that '
'can iterate\n'
' over all the objects in the container. For mappings, '
'it should\n'
' iterate over the keys of the container, and should also '
'be made\n'
' available as | |
<gh_stars>0
from collections import OrderedDict
from django import forms
from django.contrib import admin
from django.core.paginator import Paginator
from django.db.models import Count, Q, Prefetch
from django.template import loader
from django.utils.translation import ugettext
from rangefilter.filter import (
DateRangeFilter as DateRangeFilterBase,
)
from olympia import amo
from olympia.access import acl
from olympia.addons.models import Addon, AddonApprovalsCounter
from olympia.amo.admin import CommaSearchInAdminMixin
from olympia.ratings.models import Rating
from olympia.translations.utils import truncate_text
from .models import AbuseReport
class AbuseReportTypeFilter(admin.SimpleListFilter):
# Human-readable title which will be displayed in the
# right admin sidebar just above the filter options.
title = ugettext('type')
# Parameter for the filter that will be used in the URL query.
parameter_name = 'type'
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
return (
('user', ugettext('Users')),
('addon', ugettext('Addons')),
)
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
if self.value() == 'user':
return queryset.filter(user__isnull=False)
elif self.value() == 'addon':
return queryset.filter(Q(addon__isnull=False) |
Q(guid__isnull=False))
return queryset
class FakeChoicesMixin(object):
def choices(self, changelist):
"""
Fake choices method (we don't need one, we don't really have choices
for this filter, it's an input widget) that fetches the params and the
current values for other filters, so that we can feed that into
the form that our template displays.
(We don't control the data passed down to the template, so re-using
this one is our only option)
"""
# Grab search query parts and filter query parts as tuples of tuples.
search_query_parts = (
((admin.views.main.SEARCH_VAR, changelist.query),)
) if changelist.query else ()
filters_query_parts = tuple(
(k, v)
for k, v in changelist.get_filters_params().items()
if k not in self.expected_parameters()
)
# Assemble them into a `query_parts` property on a unique fake choice.
all_choice = next(super().choices(changelist))
all_choice['query_parts'] = search_query_parts + filters_query_parts
yield all_choice
class MinimumReportsCountFilter(FakeChoicesMixin, admin.SimpleListFilter):
"""
Custom filter for minimum reports count param.
Does *not* do the actual filtering of the queryset, as it needs to be done
with an aggregate query after all filters have been applied. That part is
implemented in the model admin, see AbuseReportAdmin.get_search_results().
Needs FakeChoicesMixin for the fake choices the template will be using.
Original idea:
https://hakibenita.com/how-to-add-a-text-filter-to-django-admin
"""
template = 'admin/abuse/abusereport/minimum_reports_count_filter.html'
title = ugettext('minimum reports count (grouped by guid)')
parameter_name = 'minimum_reports_count'
def lookups(self, request, model_admin):
"""
Fake lookups() method required to show the filter.
"""
return ((),)
def queryset(self, request, queryset):
return queryset
class HTML5DateInput(forms.DateInput):
format_key = 'DATE_INPUT_FORMATS'
input_type = 'date'
class DateRangeFilter(FakeChoicesMixin, DateRangeFilterBase):
"""
Custom rangefilter.filters.DateTimeRangeFilter class that uses HTML5
widgets and a template without the need for inline CSS/JavaScript.
Needs FakeChoicesMixin for the fake choices the template will be using (the
upstream implementation depends on JavaScript for this).
"""
template = 'admin/abuse/abusereport/date_range_filter.html'
title = ugettext('creation date')
def _get_form_fields(self):
return OrderedDict((
(self.lookup_kwarg_gte, forms.DateField(
label='From',
widget=HTML5DateInput(),
localize=True,
required=False
)),
(self.lookup_kwarg_lte, forms.DateField(
label='To',
widget=HTML5DateInput(),
localize=True,
required=False
)),
))
def choices(self, changelist):
# We want a fake 'All' choice as per FakeChoicesMixin, but as of 0.3.15
# rangefilter's implementation doesn't bother setting the selected
# property, and our mixin calls super(), so we have to do it here.
all_choice = next(super().choices(changelist))
all_choice['selected'] = not any(self.used_parameters)
yield all_choice
class AbuseReportAdmin(CommaSearchInAdminMixin, admin.ModelAdmin):
class Media:
css = {
'all': ('css/admin/abuse_reports.css',)
}
actions = ('delete_selected', 'mark_as_valid', 'mark_as_suspicious')
date_hierarchy = 'modified'
list_display = ('target_name', 'guid', 'type', 'state', 'distribution',
'reason', 'message_excerpt', 'created')
list_filter = (
AbuseReportTypeFilter,
'state',
'reason',
('created', DateRangeFilter),
MinimumReportsCountFilter,
)
list_select_related = ('user',) # For `addon` see get_queryset() below.
# Shouldn't be needed because those fields should all be readonly, but just
# in case we change our mind, FKs should be raw id fields as usual in our
# admin tools.
raw_id_fields = ('addon', 'user', 'reporter')
# All fields except state must be readonly - the submitted data should
# not be changed, only the state for triage.
readonly_fields = (
'created',
'modified',
'reporter',
'country_code',
'addon',
'guid',
'user',
'message',
'client_id',
'addon_name',
'addon_summary',
'addon_version',
'addon_signature',
'application',
'application_version',
'application_locale',
'operating_system',
'operating_system_version',
'install_date',
'addon_install_origin',
'addon_install_method',
'addon_install_source',
'addon_install_source_url',
'report_entry_point',
'addon_card',
)
ADDON_METADATA_FIELDSET = 'Add-on metadata'
fieldsets = (
(None, {'fields': ('state', 'reason', 'message')}),
(None, {'fields': (
'created',
'modified',
'reporter',
'country_code',
'client_id',
'addon_signature',
'application',
'application_version',
'application_locale',
'operating_system',
'operating_system_version',
'install_date',
'addon_install_origin',
'addon_install_method',
'addon_install_source',
'addon_install_source_url',
'report_entry_point'
)})
)
# The first fieldset is going to be dynamically added through
# get_fieldsets() depending on the target (add-on, user or unknown add-on),
# using the fields below:
dynamic_fieldset_fields = {
# Known add-on in database
'addon': ('addon_card',),
# User
'user': ('user',),
# Unknown add-on, we only have the guid and maybe some extra addon_*
# fields that were submitted with the report.
'guid': ('addon_name', 'addon_version', 'guid', 'addon_summary'),
}
view_on_site = False # Abuse reports have no public page to link to.
def has_add_permission(self, request):
# Adding new abuse reports through the admin is useless, so we prevent
# it.
return False
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context['show_save_and_continue'] = False # Don't need this.
return super().change_view(
request, object_id, form_url, extra_context=extra_context,
)
def delete_queryset(self, request, queryset):
"""Given a queryset, soft-delete it from the database."""
queryset.update(state=AbuseReport.STATES.DELETED)
def get_actions(self, request):
actions = super().get_actions(request)
if not acl.action_allowed(request, amo.permissions.ABUSEREPORTS_EDIT):
# You need AbuseReports:Edit for the extra actions.
actions.pop('mark_as_valid')
actions.pop('mark_as_suspicious')
return actions
def get_search_fields(self, request):
"""
Return search fields according to the type filter.
"""
type_ = request.GET.get('type')
if type_ == 'addon':
search_fields = (
'addon__name__localized_string', 'addon__slug', 'addon_name',
'=guid', 'message', '=addon__id',
)
elif type_ == 'user':
search_fields = (
'message', '=user__id', '^user__username', '^user__email',
)
else:
search_fields = ()
return search_fields
def get_search_id_field(self, request):
"""
Return the field to use when all search terms are numeric, according to
the type filter.
"""
type_ = request.GET.get('type')
if type_ == 'addon':
search_field = 'addon_id'
elif type_ == 'user':
search_field = 'user_id'
else:
search_field = super().get_search_id_field(request)
return search_field
def get_search_results(self, request, qs, search_term):
"""
Custom get_search_results() method that handles minimum_reports_count.
"""
minimum_reports_count = request.GET.get('minimum_reports_count')
if minimum_reports_count:
# minimum_reports_count has its own custom filter class but the
# filtering is actually done here, because it needs to happen after
# all other filters have been applied in order for the aggregate
# queryset to be correct.
guids = (qs.values_list('guid', flat=True)
.filter(guid__isnull=False)
.annotate(Count('guid'))
.filter(guid__count__gte=minimum_reports_count)
.order_by())
qs = qs.filter(guid__in=list(guids))
qs, use_distinct = super().get_search_results(request, qs, search_term)
return qs, use_distinct
def get_queryset(self, request):
qs = super().get_queryset(request)
# Minimize number of queries : for users linked to abuse reports, we
# don't have transformers, so we can directly make a JOIN, and that's
# taken care of by list_select_related. For addons, we want the
# translations transformer, so the most efficient way to load them is
# through prefetch_related() + only_translations() (we don't care about
# the other transforms).
return qs.prefetch_related(
Prefetch(
'addon', queryset=Addon.objects.all().only_translations()),
)
def get_fieldsets(self, request, obj=None):
if obj.addon:
target = 'addon'
elif obj.user:
target = 'user'
else:
target = 'guid'
dynamic_fieldset = (
(None, {'fields': self.dynamic_fieldset_fields[target]}),
)
return dynamic_fieldset + self.fieldsets
def target_name(self, obj):
name = obj.target.name if obj.target else obj.addon_name
return '%s %s' % (name, obj.addon_version or '')
target_name.short_description = ugettext('User / Add-on')
def addon_card(self, obj):
template = loader.get_template('reviewers/addon_details_box.html')
addon = obj.addon
try:
approvals_info = addon.addonapprovalscounter
except AddonApprovalsCounter.DoesNotExist:
approvals_info = None
developers = addon.listed_authors
# Provide all the necessary context addon_details_box.html needs. Note
# the use of Paginator() to match what the template expects.
context = {
'addon': addon,
'addon_name': addon.name,
'approvals_info': approvals_info,
'reports': Paginator(
(AbuseReport.objects
.filter(Q(addon=addon) | Q(user__in=developers))
.order_by('-created')), 5).page(1),
'user_ratings': Paginator(
(Rating.without_replies
.filter(addon=addon, rating__lte=3, body__isnull=False)
.order_by('-created')), 5).page(1),
'version': addon.current_version,
}
return template.render(context)
addon_card.short_description = ''
def distribution(self, obj):
return obj.get_addon_signature_display() if obj.addon_signature else ''
distribution.short_description = ugettext('Distribution')
def reporter_country(self, obj):
return obj.country_code
reporter_country.short_description = ugettext("Reporter's country")
def message_excerpt(self, obj):
return truncate_text(obj.message, 140)[0] if obj.message else ''
message_excerpt.short_description = ugettext('Message excerpt')
def mark_as_valid(self, request, qs):
for obj in qs:
obj.update(state=AbuseReport.STATES.VALID)
self.message_user(
request,
ugettext(
'The %d selected reports have | |
name='res4b12_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b12_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b12_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b12_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b12_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b12_branch2c'))
(self.feed('res4b11_relu',
'bn4b12_branch2c')
.add(name='res4b12')
.relu(name='res4b12_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b13_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b13_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b13_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b13_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b13_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b13_branch2c'))
(self.feed('res4b12_relu',
'bn4b13_branch2c')
.add(name='res4b13')
.relu(name='res4b13_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b14_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b14_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b14_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b14_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b14_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b14_branch2c'))
(self.feed('res4b13_relu',
'bn4b14_branch2c')
.add(name='res4b14')
.relu(name='res4b14_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b15_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b15_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b15_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b15_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b15_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b15_branch2c'))
(self.feed('res4b14_relu',
'bn4b15_branch2c')
.add(name='res4b15')
.relu(name='res4b15_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b16_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b16_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b16_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b16_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b16_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b16_branch2c'))
(self.feed('res4b15_relu',
'bn4b16_branch2c')
.add(name='res4b16')
.relu(name='res4b16_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b17_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b17_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b17_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b17_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b17_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b17_branch2c'))
(self.feed('res4b16_relu',
'bn4b17_branch2c')
.add(name='res4b17')
.relu(name='res4b17_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b18_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b18_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b18_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b18_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b18_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b18_branch2c'))
(self.feed('res4b17_relu',
'bn4b18_branch2c')
.add(name='res4b18')
.relu(name='res4b18_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b19_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b19_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b19_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b19_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b19_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b19_branch2c'))
(self.feed('res4b18_relu',
'bn4b19_branch2c')
.add(name='res4b19')
.relu(name='res4b19_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b20_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b20_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b20_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b20_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b20_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b20_branch2c'))
(self.feed('res4b19_relu',
'bn4b20_branch2c')
.add(name='res4b20')
.relu(name='res4b20_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b21_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b21_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b21_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b21_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b21_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b21_branch2c'))
(self.feed('res4b20_relu',
'bn4b21_branch2c')
.add(name='res4b21')
.relu(name='res4b21_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b22_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b22_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b22_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b22_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b22_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b22_branch2c'))
(self.feed('res4b21_relu',
'bn4b22_branch2c')
.add(name='res4b22')
.relu(name='res4b22_relu')
.conv(1, 2048, 2, biased=False, relu=False, name='res5a_branch1')
.batch_normalization(scale=True, center=True, name='bn5a_branch1'))
(self.feed('res4b22_relu')
.conv(1, 512, 2, biased=False, relu=False, name='res5a_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn5a_branch2a')
.conv(3, 512, 1, biased=False, relu=False, name='res5a_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn5a_branch2b')
.conv(1, 2048, 1, biased=False, relu=False, name='res5a_branch2c')
.batch_normalization(scale=True, center=True, name='bn5a_branch2c'))
(self.feed('bn5a_branch1',
'bn5a_branch2c')
.add(name='res5a')
.relu(name='res5a_relu')
.conv(1, 512, 1, biased=False, relu=False, name='res5b_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn5b_branch2a')
.conv(3, 512, 1, biased=False, relu=False, name='res5b_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn5b_branch2b')
.conv(1, 2048, 1, biased=False, relu=False, name='res5b_branch2c')
.batch_normalization(scale=True, center=True, name='bn5b_branch2c'))
(self.feed('res5a_relu',
'bn5b_branch2c')
.add(name='res5b')
.relu(name='res5b_relu')
.conv(1, 512, 1, biased=False, relu=False, name='res5c_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn5c_branch2a')
.conv(3, 512, 1, biased=False, relu=False, name='res5c_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn5c_branch2b')
.conv(1, 2048, 1, biased=False, relu=False, name='res5c_branch2c')
.batch_normalization(scale=True, center=True, name='bn5c_branch2c'))
(self.feed('res5b_relu',
'bn5c_branch2c')
.add(name='res5c')
.relu(name='res5c_relu')
.avg_pool(7, 1, padding='VALID', name='pool5'))
if not self.fcn:
(self.feed('pool5')
.fc(1000, relu=False, name='fc1000')
.softmax(name='prob'))
class ResNet152(Network):
def setup(self):
(self.feed('data')
.conv(7, 64, 2, biased=False, relu=False, name='conv1')
.batch_normalization(scale=True, center=True, relu=True, name='bn_conv1')
.max_pool(3, 2, name='pool1')
.conv(1, 256, 1, biased=False, relu=False, name='res2a_branch1')
.batch_normalization(scale=True, center=True, name='bn2a_branch1'))
(self.feed('pool1')
.conv(1, 64, 1, biased=False, relu=False, name='res2a_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn2a_branch2a')
.conv(3, 64, 1, biased=False, relu=False, name='res2a_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn2a_branch2b')
.conv(1, 256, 1, biased=False, relu=False, name='res2a_branch2c')
.batch_normalization(scale=True, center=True, name='bn2a_branch2c'))
(self.feed('bn2a_branch1',
'bn2a_branch2c')
.add(name='res2a')
.relu(name='res2a_relu')
.conv(1, 64, 1, biased=False, relu=False, name='res2b_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn2b_branch2a')
.conv(3, 64, 1, biased=False, relu=False, name='res2b_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn2b_branch2b')
.conv(1, 256, 1, biased=False, relu=False, name='res2b_branch2c')
.batch_normalization(scale=True, center=True, name='bn2b_branch2c'))
(self.feed('res2a_relu',
'bn2b_branch2c')
.add(name='res2b')
.relu(name='res2b_relu')
.conv(1, 64, 1, biased=False, relu=False, name='res2c_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn2c_branch2a')
.conv(3, 64, 1, biased=False, relu=False, name='res2c_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn2c_branch2b')
.conv(1, 256, 1, biased=False, relu=False, name='res2c_branch2c')
.batch_normalization(scale=True, center=True, name='bn2c_branch2c'))
(self.feed('res2b_relu',
'bn2c_branch2c')
.add(name='res2c')
.relu(name='res2c_relu')
.conv(1, 512, 2, biased=False, relu=False, name='res3a_branch1')
.batch_normalization(scale=True, center=True, name='bn3a_branch1'))
(self.feed('res2c_relu')
.conv(1, 128, 2, biased=False, relu=False, name='res3a_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn3a_branch2a')
.conv(3, 128, 1, biased=False, relu=False, name='res3a_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn3a_branch2b')
.conv(1, 512, 1, biased=False, relu=False, name='res3a_branch2c')
.batch_normalization(scale=True, center=True, name='bn3a_branch2c'))
(self.feed('bn3a_branch1',
'bn3a_branch2c')
.add(name='res3a')
.relu(name='res3a_relu')
.conv(1, 128, 1, biased=False, relu=False, name='res3b1_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn3b1_branch2a')
.conv(3, 128, 1, biased=False, relu=False, name='res3b1_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn3b1_branch2b')
.conv(1, 512, 1, biased=False, relu=False, name='res3b1_branch2c')
.batch_normalization(scale=True, center=True, name='bn3b1_branch2c'))
(self.feed('res3a_relu',
'bn3b1_branch2c')
.add(name='res3b1')
.relu(name='res3b1_relu')
.conv(1, 128, 1, biased=False, relu=False, name='res3b2_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn3b2_branch2a')
.conv(3, 128, 1, biased=False, relu=False, name='res3b2_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn3b2_branch2b')
.conv(1, 512, 1, biased=False, relu=False, name='res3b2_branch2c')
.batch_normalization(scale=True, center=True, name='bn3b2_branch2c'))
(self.feed('res3b1_relu',
'bn3b2_branch2c')
.add(name='res3b2')
.relu(name='res3b2_relu')
.conv(1, 128, 1, biased=False, relu=False, name='res3b3_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn3b3_branch2a')
.conv(3, 128, 1, biased=False, relu=False, name='res3b3_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn3b3_branch2b')
.conv(1, 512, 1, biased=False, relu=False, name='res3b3_branch2c')
.batch_normalization(scale=True, center=True, name='bn3b3_branch2c'))
(self.feed('res3b2_relu',
'bn3b3_branch2c')
.add(name='res3b3')
.relu(name='res3b3_relu')
.conv(1, 128, 1, biased=False, relu=False, name='res3b4_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn3b4_branch2a')
.conv(3, 128, 1, biased=False, relu=False, name='res3b4_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn3b4_branch2b')
.conv(1, 512, 1, biased=False, relu=False, name='res3b4_branch2c')
.batch_normalization(scale=True, center=True, name='bn3b4_branch2c'))
(self.feed('res3b3_relu',
'bn3b4_branch2c')
.add(name='res3b4')
.relu(name='res3b4_relu')
.conv(1, 128, 1, biased=False, relu=False, name='res3b5_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn3b5_branch2a')
.conv(3, 128, 1, biased=False, relu=False, name='res3b5_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn3b5_branch2b')
.conv(1, 512, 1, biased=False, relu=False, name='res3b5_branch2c')
.batch_normalization(scale=True, center=True, name='bn3b5_branch2c'))
(self.feed('res3b4_relu',
'bn3b5_branch2c')
.add(name='res3b5')
.relu(name='res3b5_relu')
.conv(1, 128, 1, biased=False, relu=False, name='res3b6_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn3b6_branch2a')
.conv(3, 128, 1, biased=False, relu=False, name='res3b6_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn3b6_branch2b')
.conv(1, 512, 1, biased=False, relu=False, name='res3b6_branch2c')
.batch_normalization(scale=True, center=True, name='bn3b6_branch2c'))
(self.feed('res3b5_relu',
'bn3b6_branch2c')
.add(name='res3b6')
.relu(name='res3b6_relu')
.conv(1, 128, 1, biased=False, relu=False, name='res3b7_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn3b7_branch2a')
.conv(3, 128, 1, biased=False, relu=False, name='res3b7_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn3b7_branch2b')
.conv(1, 512, 1, biased=False, relu=False, name='res3b7_branch2c')
.batch_normalization(scale=True, center=True, name='bn3b7_branch2c'))
(self.feed('res3b6_relu',
'bn3b7_branch2c')
.add(name='res3b7')
.relu(name='res3b7_relu')
.conv(1, 1024, 2, biased=False, relu=False, name='res4a_branch1')
.batch_normalization(scale=True, center=True, name='bn4a_branch1'))
(self.feed('res3b7_relu')
.conv(1, 256, 2, biased=False, relu=False, name='res4a_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4a_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4a_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4a_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4a_branch2c')
.batch_normalization(scale=True, center=True, name='bn4a_branch2c'))
(self.feed('bn4a_branch1',
'bn4a_branch2c')
.add(name='res4a')
.relu(name='res4a_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b1_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b1_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b1_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b1_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b1_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b1_branch2c'))
(self.feed('res4a_relu',
'bn4b1_branch2c')
.add(name='res4b1')
.relu(name='res4b1_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b2_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b2_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b2_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b2_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b2_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b2_branch2c'))
(self.feed('res4b1_relu',
'bn4b2_branch2c')
.add(name='res4b2')
.relu(name='res4b2_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b3_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b3_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b3_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b3_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b3_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b3_branch2c'))
(self.feed('res4b2_relu',
'bn4b3_branch2c')
.add(name='res4b3')
.relu(name='res4b3_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b4_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b4_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b4_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b4_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b4_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b4_branch2c'))
(self.feed('res4b3_relu',
'bn4b4_branch2c')
.add(name='res4b4')
.relu(name='res4b4_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b5_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b5_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b5_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b5_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b5_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b5_branch2c'))
(self.feed('res4b4_relu',
'bn4b5_branch2c')
.add(name='res4b5')
.relu(name='res4b5_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b6_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b6_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b6_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b6_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b6_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b6_branch2c'))
(self.feed('res4b5_relu',
'bn4b6_branch2c')
.add(name='res4b6')
.relu(name='res4b6_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b7_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b7_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b7_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b7_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b7_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b7_branch2c'))
(self.feed('res4b6_relu',
'bn4b7_branch2c')
.add(name='res4b7')
.relu(name='res4b7_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b8_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b8_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b8_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b8_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b8_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b8_branch2c'))
(self.feed('res4b7_relu',
'bn4b8_branch2c')
.add(name='res4b8')
.relu(name='res4b8_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b9_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b9_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b9_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b9_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b9_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b9_branch2c'))
(self.feed('res4b8_relu',
'bn4b9_branch2c')
.add(name='res4b9')
.relu(name='res4b9_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b10_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b10_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b10_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b10_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b10_branch2c')
.batch_normalization(scale=True, center=True, name='bn4b10_branch2c'))
(self.feed('res4b9_relu',
'bn4b10_branch2c')
.add(name='res4b10')
.relu(name='res4b10_relu')
.conv(1, 256, 1, biased=False, relu=False, name='res4b11_branch2a')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b11_branch2a')
.conv(3, 256, 1, biased=False, relu=False, name='res4b11_branch2b')
.batch_normalization(scale=True, center=True, relu=True, name='bn4b11_branch2b')
.conv(1, 1024, 1, biased=False, relu=False, name='res4b11_branch2c')
.batch_normalization(scale=True, center=True, | |
if _newclass:hostInterfaceMajorVersion = _swig_property(_ok.okTDeviceInfo_hostInterfaceMajorVersion_get, _ok.okTDeviceInfo_hostInterfaceMajorVersion_set)
__swig_setmethods__["hostInterfaceMinorVersion"] = _ok.okTDeviceInfo_hostInterfaceMinorVersion_set
__swig_getmethods__["hostInterfaceMinorVersion"] = _ok.okTDeviceInfo_hostInterfaceMinorVersion_get
if _newclass:hostInterfaceMinorVersion = _swig_property(_ok.okTDeviceInfo_hostInterfaceMinorVersion_get, _ok.okTDeviceInfo_hostInterfaceMinorVersion_set)
__swig_setmethods__["isPLL22150Supported"] = _ok.okTDeviceInfo_isPLL22150Supported_set
__swig_getmethods__["isPLL22150Supported"] = _ok.okTDeviceInfo_isPLL22150Supported_get
if _newclass:isPLL22150Supported = _swig_property(_ok.okTDeviceInfo_isPLL22150Supported_get, _ok.okTDeviceInfo_isPLL22150Supported_set)
__swig_setmethods__["isPLL22393Supported"] = _ok.okTDeviceInfo_isPLL22393Supported_set
__swig_getmethods__["isPLL22393Supported"] = _ok.okTDeviceInfo_isPLL22393Supported_get
if _newclass:isPLL22393Supported = _swig_property(_ok.okTDeviceInfo_isPLL22393Supported_get, _ok.okTDeviceInfo_isPLL22393Supported_set)
__swig_setmethods__["isFrontPanelEnabled"] = _ok.okTDeviceInfo_isFrontPanelEnabled_set
__swig_getmethods__["isFrontPanelEnabled"] = _ok.okTDeviceInfo_isFrontPanelEnabled_get
if _newclass:isFrontPanelEnabled = _swig_property(_ok.okTDeviceInfo_isFrontPanelEnabled_get, _ok.okTDeviceInfo_isFrontPanelEnabled_set)
__swig_setmethods__["wireWidth"] = _ok.okTDeviceInfo_wireWidth_set
__swig_getmethods__["wireWidth"] = _ok.okTDeviceInfo_wireWidth_get
if _newclass:wireWidth = _swig_property(_ok.okTDeviceInfo_wireWidth_get, _ok.okTDeviceInfo_wireWidth_set)
__swig_setmethods__["triggerWidth"] = _ok.okTDeviceInfo_triggerWidth_set
__swig_getmethods__["triggerWidth"] = _ok.okTDeviceInfo_triggerWidth_get
if _newclass:triggerWidth = _swig_property(_ok.okTDeviceInfo_triggerWidth_get, _ok.okTDeviceInfo_triggerWidth_set)
__swig_setmethods__["pipeWidth"] = _ok.okTDeviceInfo_pipeWidth_set
__swig_getmethods__["pipeWidth"] = _ok.okTDeviceInfo_pipeWidth_get
if _newclass:pipeWidth = _swig_property(_ok.okTDeviceInfo_pipeWidth_get, _ok.okTDeviceInfo_pipeWidth_set)
__swig_setmethods__["registerAddressWidth"] = _ok.okTDeviceInfo_registerAddressWidth_set
__swig_getmethods__["registerAddressWidth"] = _ok.okTDeviceInfo_registerAddressWidth_get
if _newclass:registerAddressWidth = _swig_property(_ok.okTDeviceInfo_registerAddressWidth_get, _ok.okTDeviceInfo_registerAddressWidth_set)
__swig_setmethods__["registerDataWidth"] = _ok.okTDeviceInfo_registerDataWidth_set
__swig_getmethods__["registerDataWidth"] = _ok.okTDeviceInfo_registerDataWidth_get
if _newclass:registerDataWidth = _swig_property(_ok.okTDeviceInfo_registerDataWidth_get, _ok.okTDeviceInfo_registerDataWidth_set)
__swig_setmethods__["flashSystem"] = _ok.okTDeviceInfo_flashSystem_set
__swig_getmethods__["flashSystem"] = _ok.okTDeviceInfo_flashSystem_get
if _newclass:flashSystem = _swig_property(_ok.okTDeviceInfo_flashSystem_get, _ok.okTDeviceInfo_flashSystem_set)
__swig_setmethods__["flashFPGA"] = _ok.okTDeviceInfo_flashFPGA_set
__swig_getmethods__["flashFPGA"] = _ok.okTDeviceInfo_flashFPGA_get
if _newclass:flashFPGA = _swig_property(_ok.okTDeviceInfo_flashFPGA_get, _ok.okTDeviceInfo_flashFPGA_set)
__swig_setmethods__["hasFMCEEPROM"] = _ok.okTDeviceInfo_hasFMCEEPROM_set
__swig_getmethods__["hasFMCEEPROM"] = _ok.okTDeviceInfo_hasFMCEEPROM_get
if _newclass:hasFMCEEPROM = _swig_property(_ok.okTDeviceInfo_hasFMCEEPROM_get, _ok.okTDeviceInfo_hasFMCEEPROM_set)
__swig_setmethods__["hasResetProfiles"] = _ok.okTDeviceInfo_hasResetProfiles_set
__swig_getmethods__["hasResetProfiles"] = _ok.okTDeviceInfo_hasResetProfiles_get
if _newclass:hasResetProfiles = _swig_property(_ok.okTDeviceInfo_hasResetProfiles_get, _ok.okTDeviceInfo_hasResetProfiles_set)
def __init__(self):
this = _ok.new_okTDeviceInfo()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ok.delete_okTDeviceInfo
__del__ = lambda self : None;
okTDeviceInfo_swigregister = _ok.okTDeviceInfo_swigregister
okTDeviceInfo_swigregister(okTDeviceInfo)
class okBuffer(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, okBuffer, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, okBuffer, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _ok.new_okBuffer(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ok.delete_okBuffer
__del__ = lambda self : None;
def IsEmpty(self): return _ok.okBuffer_IsEmpty(self)
def GetSize(self): return _ok.okBuffer_GetSize(self)
def __getitem__(self, *args): return _ok.okBuffer___getitem__(self, *args)
def __setitem__(self, *args): return _ok.okBuffer___setitem__(self, *args)
okBuffer_swigregister = _ok.okBuffer_swigregister
okBuffer_swigregister(okBuffer)
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _ok.delete_SwigPyIterator
__del__ = lambda self : None;
def value(self): return _ok.SwigPyIterator_value(self)
def incr(self, n=1): return _ok.SwigPyIterator_incr(self, n)
def decr(self, n=1): return _ok.SwigPyIterator_decr(self, n)
def distance(self, *args): return _ok.SwigPyIterator_distance(self, *args)
def equal(self, *args): return _ok.SwigPyIterator_equal(self, *args)
def copy(self): return _ok.SwigPyIterator_copy(self)
def next(self): return _ok.SwigPyIterator_next(self)
def __next__(self): return _ok.SwigPyIterator___next__(self)
def previous(self): return _ok.SwigPyIterator_previous(self)
def advance(self, *args): return _ok.SwigPyIterator_advance(self, *args)
def __eq__(self, *args): return _ok.SwigPyIterator___eq__(self, *args)
def __ne__(self, *args): return _ok.SwigPyIterator___ne__(self, *args)
def __iadd__(self, *args): return _ok.SwigPyIterator___iadd__(self, *args)
def __isub__(self, *args): return _ok.SwigPyIterator___isub__(self, *args)
def __add__(self, *args): return _ok.SwigPyIterator___add__(self, *args)
def __sub__(self, *args): return _ok.SwigPyIterator___sub__(self, *args)
def __iter__(self): return self
SwigPyIterator_swigregister = _ok.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
class okStrings(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, okStrings, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, okStrings, name)
__repr__ = _swig_repr
def iterator(self): return _ok.okStrings_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _ok.okStrings___nonzero__(self)
def __bool__(self): return _ok.okStrings___bool__(self)
def __len__(self): return _ok.okStrings___len__(self)
def pop(self): return _ok.okStrings_pop(self)
def __getslice__(self, *args): return _ok.okStrings___getslice__(self, *args)
def __setslice__(self, *args): return _ok.okStrings___setslice__(self, *args)
def __delslice__(self, *args): return _ok.okStrings___delslice__(self, *args)
def __delitem__(self, *args): return _ok.okStrings___delitem__(self, *args)
def __getitem__(self, *args): return _ok.okStrings___getitem__(self, *args)
def __setitem__(self, *args): return _ok.okStrings___setitem__(self, *args)
def append(self, *args): return _ok.okStrings_append(self, *args)
def empty(self): return _ok.okStrings_empty(self)
def size(self): return _ok.okStrings_size(self)
def clear(self): return _ok.okStrings_clear(self)
def swap(self, *args): return _ok.okStrings_swap(self, *args)
def get_allocator(self): return _ok.okStrings_get_allocator(self)
def begin(self): return _ok.okStrings_begin(self)
def end(self): return _ok.okStrings_end(self)
def rbegin(self): return _ok.okStrings_rbegin(self)
def rend(self): return _ok.okStrings_rend(self)
def pop_back(self): return _ok.okStrings_pop_back(self)
def erase(self, *args): return _ok.okStrings_erase(self, *args)
def __init__(self, *args):
this = _ok.new_okStrings(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _ok.okStrings_push_back(self, *args)
def front(self): return _ok.okStrings_front(self)
def back(self): return _ok.okStrings_back(self)
def assign(self, *args): return _ok.okStrings_assign(self, *args)
def resize(self, *args): return _ok.okStrings_resize(self, *args)
def insert(self, *args): return _ok.okStrings_insert(self, *args)
def reserve(self, *args): return _ok.okStrings_reserve(self, *args)
def capacity(self): return _ok.okStrings_capacity(self)
__swig_destroy__ = _ok.delete_okStrings
__del__ = lambda self : None;
okStrings_swigregister = _ok.okStrings_swigregister
okStrings_swigregister(okStrings)
__version__ = "5.0.1"
class okTRegisterEntries(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, okTRegisterEntries, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, okTRegisterEntries, name)
__repr__ = _swig_repr
def iterator(self): return _ok.okTRegisterEntries_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _ok.okTRegisterEntries___nonzero__(self)
def __bool__(self): return _ok.okTRegisterEntries___bool__(self)
def __len__(self): return _ok.okTRegisterEntries___len__(self)
def pop(self): return _ok.okTRegisterEntries_pop(self)
def __getslice__(self, *args): return _ok.okTRegisterEntries___getslice__(self, *args)
def __setslice__(self, *args): return _ok.okTRegisterEntries___setslice__(self, *args)
def __delslice__(self, *args): return _ok.okTRegisterEntries___delslice__(self, *args)
def __delitem__(self, *args): return _ok.okTRegisterEntries___delitem__(self, *args)
def __getitem__(self, *args): return _ok.okTRegisterEntries___getitem__(self, *args)
def __setitem__(self, *args): return _ok.okTRegisterEntries___setitem__(self, *args)
def append(self, *args): return _ok.okTRegisterEntries_append(self, *args)
def empty(self): return _ok.okTRegisterEntries_empty(self)
def size(self): return _ok.okTRegisterEntries_size(self)
def clear(self): return _ok.okTRegisterEntries_clear(self)
def swap(self, *args): return _ok.okTRegisterEntries_swap(self, *args)
def get_allocator(self): return _ok.okTRegisterEntries_get_allocator(self)
def begin(self): return _ok.okTRegisterEntries_begin(self)
def end(self): return _ok.okTRegisterEntries_end(self)
def rbegin(self): return _ok.okTRegisterEntries_rbegin(self)
def rend(self): return _ok.okTRegisterEntries_rend(self)
def pop_back(self): return _ok.okTRegisterEntries_pop_back(self)
def erase(self, *args): return _ok.okTRegisterEntries_erase(self, *args)
def __init__(self, *args):
this = _ok.new_okTRegisterEntries(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _ok.okTRegisterEntries_push_back(self, *args)
def front(self): return _ok.okTRegisterEntries_front(self)
def back(self): return _ok.okTRegisterEntries_back(self)
def assign(self, *args): return _ok.okTRegisterEntries_assign(self, *args)
def resize(self, *args): return _ok.okTRegisterEntries_resize(self, *args)
def insert(self, *args): return _ok.okTRegisterEntries_insert(self, *args)
def reserve(self, *args): return _ok.okTRegisterEntries_reserve(self, *args)
def capacity(self): return _ok.okTRegisterEntries_capacity(self)
__swig_destroy__ = _ok.delete_okTRegisterEntries
__del__ = lambda self : None;
okTRegisterEntries_swigregister = _ok.okTRegisterEntries_swigregister
okTRegisterEntries_swigregister(okTRegisterEntries)
class okDeviceSensors(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, okDeviceSensors, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, okDeviceSensors, name)
__repr__ = _swig_repr
def iterator(self): return _ok.okDeviceSensors_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _ok.okDeviceSensors___nonzero__(self)
def __bool__(self): return _ok.okDeviceSensors___bool__(self)
def __len__(self): return _ok.okDeviceSensors___len__(self)
def pop(self): return _ok.okDeviceSensors_pop(self)
def __getslice__(self, *args): return _ok.okDeviceSensors___getslice__(self, *args)
def __setslice__(self, *args): return _ok.okDeviceSensors___setslice__(self, *args)
def __delslice__(self, *args): return _ok.okDeviceSensors___delslice__(self, *args)
def __delitem__(self, *args): return _ok.okDeviceSensors___delitem__(self, *args)
def __getitem__(self, *args): return _ok.okDeviceSensors___getitem__(self, *args)
def __setitem__(self, *args): return _ok.okDeviceSensors___setitem__(self, *args)
def append(self, *args): return _ok.okDeviceSensors_append(self, *args)
def empty(self): return _ok.okDeviceSensors_empty(self)
def size(self): return _ok.okDeviceSensors_size(self)
def clear(self): return _ok.okDeviceSensors_clear(self)
def swap(self, *args): return _ok.okDeviceSensors_swap(self, *args)
def get_allocator(self): return _ok.okDeviceSensors_get_allocator(self)
def begin(self): return _ok.okDeviceSensors_begin(self)
def end(self): return _ok.okDeviceSensors_end(self)
def rbegin(self): return _ok.okDeviceSensors_rbegin(self)
def rend(self): return _ok.okDeviceSensors_rend(self)
def pop_back(self): return _ok.okDeviceSensors_pop_back(self)
def erase(self, *args): return _ok.okDeviceSensors_erase(self, *args)
def __init__(self, *args):
this = _ok.new_okDeviceSensors(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _ok.okDeviceSensors_push_back(self, *args)
def front(self): return _ok.okDeviceSensors_front(self)
def back(self): return _ok.okDeviceSensors_back(self)
def assign(self, *args): return _ok.okDeviceSensors_assign(self, *args)
def resize(self, *args): return _ok.okDeviceSensors_resize(self, *args)
def insert(self, *args): return _ok.okDeviceSensors_insert(self, *args)
def reserve(self, *args): return _ok.okDeviceSensors_reserve(self, *args)
def capacity(self): return _ok.okDeviceSensors_capacity(self)
__swig_destroy__ = _ok.delete_okDeviceSensors
__del__ = lambda self : None;
okDeviceSensors_swigregister = _ok.okDeviceSensors_swigregister
okDeviceSensors_swigregister(okDeviceSensors)
class okCPLL22150(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, okCPLL22150, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, okCPLL22150, name)
__repr__ = _swig_repr
ClkSrc_Ref = _ok.okCPLL22150_ClkSrc_Ref
ClkSrc_Div1ByN = _ok.okCPLL22150_ClkSrc_Div1ByN
ClkSrc_Div1By2 = _ok.okCPLL22150_ClkSrc_Div1By2
ClkSrc_Div1By3 = _ok.okCPLL22150_ClkSrc_Div1By3
ClkSrc_Div2ByN = _ok.okCPLL22150_ClkSrc_Div2ByN
ClkSrc_Div2By2 = _ok.okCPLL22150_ClkSrc_Div2By2
ClkSrc_Div2By4 = _ok.okCPLL22150_ClkSrc_Div2By4
DivSrc_Ref = _ok.okCPLL22150_DivSrc_Ref
DivSrc_VCO = _ok.okCPLL22150_DivSrc_VCO
def __init__(self):
this = _ok.new_okCPLL22150()
try: self.this.append(this)
except: self.this = this
def SetCrystalLoad(self, *args): return _ok.okCPLL22150_SetCrystalLoad(self, *args)
def SetReference(self, *args): return _ok.okCPLL22150_SetReference(self, *args)
def GetReference(self): return _ok.okCPLL22150_GetReference(self)
def SetVCOParameters(self, *args): return _ok.okCPLL22150_SetVCOParameters(self, *args)
def GetVCOP(self): return _ok.okCPLL22150_GetVCOP(self)
def GetVCOQ(self): return _ok.okCPLL22150_GetVCOQ(self)
def GetVCOFrequency(self): return _ok.okCPLL22150_GetVCOFrequency(self)
def SetDiv1(self, *args): return _ok.okCPLL22150_SetDiv1(self, *args)
def SetDiv2(self, *args): return _ok.okCPLL22150_SetDiv2(self, *args)
def GetDiv1Source(self): return _ok.okCPLL22150_GetDiv1Source(self)
def GetDiv2Source(self): return _ok.okCPLL22150_GetDiv2Source(self)
def GetDiv1Divider(self): return _ok.okCPLL22150_GetDiv1Divider(self)
def GetDiv2Divider(self): return _ok.okCPLL22150_GetDiv2Divider(self)
def SetOutputSource(self, *args): return _ok.okCPLL22150_SetOutputSource(self, *args)
def SetOutputEnable(self, *args): return _ok.okCPLL22150_SetOutputEnable(self, *args)
def GetOutputSource(self, *args): return _ok.okCPLL22150_GetOutputSource(self, *args)
def GetOutputFrequency(self, *args): return _ok.okCPLL22150_GetOutputFrequency(self, *args)
def IsOutputEnabled(self, *args): return _ok.okCPLL22150_IsOutputEnabled(self, *args)
__swig_destroy__ = _ok.delete_okCPLL22150
__del__ = lambda self : None;
okCPLL22150_swigregister = _ok.okCPLL22150_swigregister
okCPLL22150_swigregister(okCPLL22150)
class okCPLL22393(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, okCPLL22393, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, okCPLL22393, name)
__repr__ = _swig_repr
ClkSrc_Ref = _ok.okCPLL22393_ClkSrc_Ref
ClkSrc_PLL0_0 = _ok.okCPLL22393_ClkSrc_PLL0_0
ClkSrc_PLL0_180 = _ok.okCPLL22393_ClkSrc_PLL0_180
ClkSrc_PLL1_0 = _ok.okCPLL22393_ClkSrc_PLL1_0
ClkSrc_PLL1_180 = _ok.okCPLL22393_ClkSrc_PLL1_180
ClkSrc_PLL2_0 = _ok.okCPLL22393_ClkSrc_PLL2_0
ClkSrc_PLL2_180 = _ok.okCPLL22393_ClkSrc_PLL2_180
def __init__(self):
this = _ok.new_okCPLL22393()
try: self.this.append(this)
except: self.this = this
def SetCrystalLoad(self, *args): return _ok.okCPLL22393_SetCrystalLoad(self, *args)
def SetReference(self, *args): return _ok.okCPLL22393_SetReference(self, *args)
def SetPLLParameters(self, *args): return _ok.okCPLL22393_SetPLLParameters(self, *args)
def SetPLLLF(self, *args): return _ok.okCPLL22393_SetPLLLF(self, *args)
def SetOutputDivider(self, *args): return _ok.okCPLL22393_SetOutputDivider(self, *args)
def SetOutputSource(self, *args): return _ok.okCPLL22393_SetOutputSource(self, *args)
def SetOutputEnable(self, *args): | |
<filename>research/syntaxnet/dragnn/python/component.py
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds a DRAGNN graph for local training."""
from abc import ABCMeta
from abc import abstractmethod
import tensorflow as tf
from dragnn.protos import export_pb2
from dragnn.python import dragnn_ops
from dragnn.python import network_units
from dragnn.python import runtime_support
from syntaxnet.util import check
from syntaxnet.util import registry
from tensorflow.python.platform import tf_logging as logging
def build_softmax_cross_entropy_loss(logits, gold):
"""Builds softmax cross entropy loss."""
# A gold label > -1 determines that the sentence is still
# in a valid state. Otherwise, the sentence has ended.
#
# We add only the valid sentences to the loss, in the following way:
# 1. We compute 'valid_ix', the indices in gold that contain
# valid oracle actions.
# 2. We compute the cost function by comparing logits and gold
# only for the valid indices.
valid = tf.greater(gold, -1)
valid_ix = tf.reshape(tf.where(valid), [-1])
valid_gold = tf.gather(gold, valid_ix)
valid_logits = tf.gather(logits, valid_ix)
cost = tf.reduce_sum(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.cast(valid_gold, tf.int64),
logits=valid_logits,
name='sparse_softmax_cross_entropy_with_logits'))
correct = tf.reduce_sum(
tf.to_int32(tf.nn.in_top_k(valid_logits, valid_gold, 1)))
total = tf.size(valid_gold)
return cost, correct, total, valid_logits, valid_gold
def build_sigmoid_cross_entropy_loss(logits, gold, indices, probs):
"""Builds sigmoid cross entropy loss."""
# Filter out entries where gold <= -1, which are batch padding entries.
valid = tf.greater(gold, -1)
valid_ix = tf.reshape(tf.where(valid), [-1])
valid_gold = tf.gather(gold, valid_ix)
valid_indices = tf.gather(indices, valid_ix)
valid_probs = tf.gather(probs, valid_ix)
# NB: tf.gather_nd() raises an error on CPU for out-of-bounds indices. That's
# why we need to filter out the gold=-1 batch padding above.
valid_pairs = tf.stack([valid_indices, valid_gold], axis=1)
valid_logits = tf.gather_nd(logits, valid_pairs)
cost = tf.reduce_sum(
tf.nn.sigmoid_cross_entropy_with_logits(
labels=valid_probs,
logits=valid_logits,
name='sigmoid_cross_entropy_with_logits'))
gold_bool = valid_probs > 0.5
predicted_bool = valid_logits > 0.0
total = tf.size(gold_bool)
with tf.control_dependencies([
tf.assert_equal(
total, tf.size(predicted_bool), name='num_predicted_gold_mismatch')
]):
agreement_bool = tf.logical_not(tf.logical_xor(gold_bool, predicted_bool))
correct = tf.reduce_sum(tf.to_int32(agreement_bool))
cost.set_shape([])
correct.set_shape([])
total.set_shape([])
return cost, correct, total, gold
class NetworkState(object):
"""Simple utility to manage the state of a DRAGNN network.
This class encapsulates the variables that are a specific to any
particular instance of a DRAGNN stack, as constructed by the
MasterBuilder below.
Attributes:
activations: Dictionary mapping layer names to StoredActivation objects.
"""
def __init__(self):
self.activations = {}
class MasterState(object):
"""Simple utility to encapsulate tensors associated with the master state.
Attributes:
handle: string tensor handle to the underlying ComputeSession.
current_batch_size: int tensor containing the current batch size.
"""
def __init__(self, handle, current_batch_size):
self.handle = handle
self.current_batch_size = current_batch_size
@registry.RegisteredClass
class ComponentBuilderBase(object):
"""Utility to build a single Component in a DRAGNN stack of models.
This class handles converting a ComponentSpec proto into various TF
sub-graphs. It will stitch together various neural units with dynamic
unrolling inside a tf.while loop.
All variables for parameters are created during the constructor within the
scope of the component's name, e.g. 'tagger/embedding_matrix_0' for a
component named 'tagger'.
As part of the specification, ComponentBuilder will wrap an underlying
NetworkUnit which generates the actual network layout.
Attributes:
master: dragnn.MasterBuilder that owns this component.
num_actions: Number of actions in the transition system.
name: Name of this component.
spec: dragnn.ComponentSpec that configures this component.
moving_average: True if moving-average parameters should be used.
"""
__metaclass__ = ABCMeta # required for @abstractmethod
def __init__(self, master, component_spec, attr_defaults=None):
"""Initializes the ComponentBuilder from specifications.
Args:
master: dragnn.MasterBuilder object.
component_spec: dragnn.ComponentSpec proto to be built.
attr_defaults: Optional dict of component attribute defaults. If not
provided or if empty, attributes are not extracted.
"""
self.master = master
self.num_actions = component_spec.num_actions
self.name = component_spec.name
self.spec = component_spec
self.moving_average = None
# Determine if this component should apply self-normalization.
self.eligible_for_self_norm = (
not self.master.hyperparams.self_norm_components_filter or self.name in
self.master.hyperparams.self_norm_components_filter.split(','))
# Extract component attributes before make_network(), so the network unit
# can access them.
self._attrs = {}
global_attr_defaults = {
'locally_normalize': False,
'output_as_probabilities': False
}
if attr_defaults:
global_attr_defaults.update(attr_defaults)
self._attrs = network_units.get_attrs_with_defaults(
self.spec.component_builder.parameters, global_attr_defaults)
do_local_norm = self._attrs['locally_normalize']
self._output_as_probabilities = self._attrs['output_as_probabilities']
with tf.variable_scope(self.name):
self.training_beam_size = tf.constant(
self.spec.training_beam_size, name='TrainingBeamSize')
self.inference_beam_size = tf.constant(
self.spec.inference_beam_size, name='InferenceBeamSize')
self.locally_normalize = tf.constant(
do_local_norm, name='LocallyNormalize')
self._step = tf.get_variable(
'step', [], initializer=tf.zeros_initializer(), dtype=tf.int32)
self._total = tf.get_variable(
'total', [], initializer=tf.zeros_initializer(), dtype=tf.int32)
# Construct network variables.
self.network = self.make_network(self.spec.network_unit)
# Construct moving average.
if self.master.hyperparams.use_moving_average:
self.moving_average = tf.train.ExponentialMovingAverage(
decay=self.master.hyperparams.average_weight, num_updates=self._step)
self.avg_ops = [self.moving_average.apply(self.network.params)]
# Used to export the cell; see add_cell_input() and add_cell_output().
self._cell_subgraph_spec = export_pb2.CellSubgraphSpec()
def make_network(self, network_unit):
"""Makes a NetworkUnitInterface object based on the network_unit spec.
Components may override this method to exert control over the
network unit construction, such as which network units are supported.
Args:
network_unit: RegisteredModuleSpec proto defining the network unit.
Returns:
An implementation of NetworkUnitInterface.
Raises:
ValueError: if the requested network unit is not found in the registry.
"""
network_type = network_unit.registered_name
with tf.variable_scope(self.name):
# Raises ValueError if not found.
return network_units.NetworkUnitInterface.Create(network_type, self)
@abstractmethod
def build_greedy_training(self, state, network_states):
"""Builds a training graph for this component.
Two assumptions are made about the resulting graph:
1. An oracle will be used to unroll the state and compute the cost.
2. The graph will be differentiable when the cost is being minimized.
Args:
state: MasterState from the 'AdvanceMaster' op that advances the
underlying master to this component.
network_states: dictionary of component NetworkState objects.
Returns:
(state, cost, correct, total) -- These are TF ops corresponding to
the final state after unrolling, the total cost, the total number of
correctly predicted actions, and the total number of actions.
"""
pass
def build_structured_training(self, state, network_states):
"""Builds a beam search based training loop for this component.
The default implementation builds a dummy graph and raises a
TensorFlow runtime exception to indicate that structured training
is not implemented.
Args:
state: MasterState from the 'AdvanceMaster' op that advances the
underlying master to this component.
network_states: dictionary of component NetworkState objects.
Returns:
(handle, cost, correct, total) -- These are TF ops corresponding
to the final handle after unrolling, the total cost, and the
total number of actions. Since the number of correctly predicted
actions is not applicable in the structured training setting, a
dummy value should returned.
"""
del network_states # Unused.
with tf.control_dependencies([tf.Assert(False, ['Not implemented.'])]):
handle = tf.identity(state.handle)
cost = tf.constant(0.)
correct, total = tf.constant(0), tf.constant(0)
return handle, cost, correct, total
@abstractmethod
def build_greedy_inference(self, state, network_states,
during_training=False):
"""Builds an inference graph for this component.
If this graph is being constructed 'during_training', then it needs to be
differentiable even though it doesn't return an explicit cost.
There may be other cases where the distinction between training and eval is
important. The handling of dropout is an example of this.
Args:
state: MasterState from the 'AdvanceMaster' op that advances the
underlying master to this component.
network_states: dictionary of component NetworkState objects.
during_training: whether the graph is being constructed during training
Returns:
Handle to the state once inference is complete for this Component.
"""
pass
def get_summaries(self):
"""Constructs a set of summaries for this component.
Returns:
List of Summary ops to get parameter norms, progress reports, and
so forth for this component.
"""
def combine_norm(matrices):
# Handles None in cases where the optimizer or moving average slot is
# not present.
squares = [tf.reduce_sum(tf.square(m)) for m in matrices if m is not None]
# Some components may not have any parameters, in which case we simply
# return zero.
if squares:
return tf.sqrt(tf.add_n(squares))
else:
return tf.constant(0, tf.float32)
summaries = []
summaries.append(tf.summary.scalar('%s step' % self.name, self._step))
summaries.append(tf.summary.scalar('%s total' % self.name, self._total))
if self.network.params:
summaries.append(
tf.summary.scalar('%s parameter Norm' % self.name,
combine_norm(self.network.params)))
slot_names = self.master.optimizer.get_slot_names()
for name in slot_names:
slot_params = [
self.master.optimizer.get_slot(p, name) for p in self.network.params
]
summaries.append(
tf.summary.scalar('%s %s Norm' % (self.name, name),
combine_norm(slot_params)))
# Construct moving average.
if self.master.hyperparams.use_moving_average:
summaries.append(
tf.summary.scalar('%s | |
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
cleanup_input, # type: "_models.TestFailoverCleanupInput"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._test_failover_cleanup_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(cleanup_input, 'TestFailoverCleanupInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_test_failover_cleanup_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/testFailoverCleanup'} # type: ignore
def begin_test_failover_cleanup(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
cleanup_input, # type: "_models.TestFailoverCleanupInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Execute test failover cleanup.
Operation to clean up the test failover of a replication protected item.
:param fabric_name: Unique fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:param cleanup_input: Test failover cleanup input.
:type cleanup_input: ~azure.mgmt.recoveryservicessiterecovery.models.TestFailoverCleanupInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._test_failover_cleanup_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
cleanup_input=cleanup_input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_test_failover_cleanup.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/testFailoverCleanup'} # type: ignore
def _unplanned_failover_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
failover_input, # type: "_models.UnplannedFailoverInput"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._unplanned_failover_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(failover_input, 'UnplannedFailoverInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_unplanned_failover_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/unplannedFailover'} # type: ignore
def begin_unplanned_failover(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
failover_input, # type: "_models.UnplannedFailoverInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Execute unplanned failover.
Operation to initiate a failover of the replication protected item.
:param fabric_name: Unique fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:param failover_input: Failover input.
:type failover_input: ~azure.mgmt.recoveryservicessiterecovery.models.UnplannedFailoverInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._unplanned_failover_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
failover_input=failover_input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_unplanned_failover.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/unplannedFailover'} # type: ignore
def _update_appliance_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
appliance_update_input, # type: "_models.UpdateApplianceForReplicationProtectedItemInput"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_appliance_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(appliance_update_input, 'UpdateApplianceForReplicationProtectedItemInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_appliance_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/updateAppliance'} # type: ignore
def begin_update_appliance(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
appliance_update_input, # type: "_models.UpdateApplianceForReplicationProtectedItemInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Updates appliance for replication protected Item.
The operation to update appliance of an ASR replication protected item.
:param fabric_name: Fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: | |
'''ResNet in PyTorch.'''
import os
import argparse
import shutil
import time
import json
import math
import operator
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torch.nn import Parameter
from torch.autograd import Variable
#from models import *
import scipy.io as sio
import numpy as np
# import building_blocks as bb
from models import *
from models_resnet import *
from init import LSUVinit
import gpustat
parser = argparse.ArgumentParser(description='PyTorch CIFAR Training')
parser.add_argument('--data-folder', '-d',
help='path to dataset')
parser.add_argument('--arch', default='None', type=str,
help='Specify model architecture')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=300, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--lr-decay', default=0.1, type=float,
metavar='LRD', help='lr decay (default: 0.1)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='tcp://172.16.31.10:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='gloo', type=str,
help='distributed backend')
parser.add_argument('--plot-filename', default='plot', type=str,
help='Specify the filename of plot')
parser.add_argument('--ckpt-filename', default='checkpoint.pth.tar', type=str,
help='Specify the filename of checkpoint')
parser.add_argument('--fp16', dest='fp16', action='store_true',
help='use half-precision FP16')
parser.add_argument('--debug', dest='debug', action='store_true',
help='print shape of tensors')
parser.add_argument('--keep-prob', default=None, type=float, metavar='KEEP',
help='the ratio of kept (active) neurons')
parser.add_argument('--topk', dest='topk', action='store_true',
help='turn on top-k mask on CONV layer')
parser.add_argument('--sign-mask', dest='sign_mask', action='store_true',
help='turn on sign mask on CONV layer')
parser.add_argument('--random-proj', dest='rp', action='store_true',
help='turn on Random Projection based masking on CONV layer')
parser.add_argument('--turnoff-rp', dest='rp_off', action='store_true',
help='turn OFF Random Projection based masking on CONV layer')
parser.add_argument('--proj-update-freq', default=100, type=int,
help='Random Projection update frequency (default 100)')
parser.add_argument('--eps-jl', default=0.5, type=float,
metavar='EJL', help='epsilon_JL (default: 0.5)')
parser.add_argument('--no-bn', dest='bn', action='store_false',
help='turn off Batch Normalization')
parser.add_argument('--width', default=1, type=int,
help='Wide ResNet width (default 1)')
parser.add_argument('--num-class', default=10, type=int,
help='By default use CIFAR10')
def mem_usage(device=1):
gpu_stats = gpustat.GPUStatCollection.new_query()
item = gpu_stats.jsonify()['gpus'][device]
print('{}/{}, utilization {}'.format(item['memory.used'], item['memory.total'], item['utilization.gpu']))
def main():
# Parse arguments
global args, best_acc, use_cuda, dtype
args = parser.parse_args()
best_acc = 0
data_dir = args.data_folder
use_cuda = torch.cuda.is_available()
dtype = torch.FloatTensor
torch.manual_seed(args.seed)
# Instantiate model
if args.rp or args.topk:
if not args.keep_prob:
print('Please specify keep_prob when using random projection or top-k')
return
print('Loading model')
if args.arch == 'vgg8':
if args.topk:
model = VGG8TopK(num_class=args.num_class, use_bn=args.bn, keep_prob=args.keep_prob)
print('model using VGG8TopK')
elif args.rp:
model = VGG8RP(num_class=args.num_class, use_bn=args.bn, keep_prob=args.keep_prob,
eps_jl=args.eps_jl)
print('model using VGG8 with sparse random projection')
elif args.sign_mask:
model = VGG8Sign(num_class=args.num_class, use_bn=args.bn, keep_prob=args.keep_prob)
print('model using sign mask')
else:
model = VGG8(num_class=args.num_class, use_bn=args.bn, keep_prob=args.keep_prob)
print('model using basic VGG8')
elif args.arch == 'resnet8':
model = resnet8(num_class=args.num_class, use_bn=args.bn, use_rp=args.rp, eps_jl=args.eps_jl,
keep_prob=args.keep_prob, width=args.width)
print('model using basic ResNet8')
if args.keep_prob and not args.rp:
print('Passing keep_prob w/o random-proj assumes using topk')
elif args.arch == 'resnet20':
model = resnet20(use_bn=args.bn, use_rp=args.rp, eps_jl=args.eps_jl,
keep_prob=args.keep_prob)
print('model using ResNet-20')
if args.keep_prob and not args.rp:
print('Passing keep_prob w/o random-proj assumes using topk')
else:
print('Please specify model by passing -a')
return
if not args.bn:
print('model not using Batch Normalization')
print('Finish load model')
# Setup model and criterion if using GPU
if use_cuda:
dtype = torch.cuda.FloatTensor
torch.cuda.manual_seed(args.seed)
model.cuda()
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss().cuda()
print('Using CUDA')
else:
criterion = nn.CrossEntropyLoss()
print('No CUDA involved')
# if args.rp:
# model.setup_rp()
# Setup optimizer
# optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
optimizer = optim.SGD(model.parameters(), lr=args.lr,
weight_decay=args.weight_decay, momentum=args.momentum)
# Data preprocessing
# The output of torchvision datasets are PILImage images of range [0, 1].
# We transform them to Tensors of normalized range [-1, 1]
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
])
if args.num_class == 100:
trainset = dset.CIFAR100(root=data_dir, train=True, download=True, transform=transform_train)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers)
testset = dset.CIFAR100(root=data_dir, train=False, download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size,
shuffle=False, num_workers=args.workers)
else:
trainset = dset.CIFAR10(root=data_dir, train=True, download=True, transform=transform_train)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers)
testset = dset.CIFAR10(root=data_dir, train=False, download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size,
shuffle=False, num_workers=args.workers)
# Training/inference steps
lr = args.lr
lr_decay = args.lr_decay
best_loss = 100000.0
best_loss_epoch = 0
lr_update_epoch = 0
loss_plt = {'train': [], 'eval': []}
acc_plt = {'train': [], 'eval': []}
act_prob_plt = {
'conv1': [],
'conv2': [],
'conv3': [],
'conv4': [],
'conv5': [],
'conv6': []
}
# LSUV initialization
if args.rp:
model.init_rp()
if use_cuda:
model.cuda()
model.setup_rp()
if args.rp and args.rp_off:
model.turnoff_rp()
if args.sign_mask:
model.setup_rp()
if not (args.evaluate or args.resume):
end = time.time()
for i, (inputs, targets) in enumerate(train_loader):
# measure LSUV initialization time
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs_var, targets_var = Variable(inputs), Variable(targets)
model = LSUVinit(model, inputs_var, cuda=use_cuda)
if i > 0: break
print('LSUVinit time: ', time.time() - end)
# Optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc = checkpoint['best_acc']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if args.evaluate:
evaluate(test_loader, model, criterion)
else:
for epoch in range(args.start_epoch, args.epochs):
# if args.distributed:
# train_sampler.set_epoch(epoch)
lr = adjust_learning_rate(optimizer, lr, epoch + 1)
# train for one epoch
train_loss, train_acc, train_layer_act_prob = train(train_loader, model,
criterion, optimizer, lr, epoch)
# evaluate on validation set
eval_loss, eval_acc = evaluate(test_loader, model, criterion)
if eval_loss < best_loss:
best_loss = eval_loss
best_loss_epoch = epoch
# if epoch > best_loss_epoch + 10:
# if epoch > lr_update_epoch + 10:
# lr = lr * lr_decay
# lr_update_epoch = epoch
# remember best eval_acc and save checkpoint
is_best = eval_acc > best_acc
best_acc = max(eval_acc, best_acc)
save_checkpoint({
'epoch': epoch + 1,
# 'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
}, is_best, epoch, filename=args.ckpt_filename)
loss_plt['train'].append(train_loss)
loss_plt['eval'].append(eval_loss)
acc_plt['train'].append(train_acc)
acc_plt['eval'].append(eval_acc)
# for l in range(1, 7):
# act_prob_plt['conv%d' % l].append(format(train_layer_act_prob['conv%d' % l].avg, '.2f'))
f = open(args.plot_filename, 'w')
json.dump({'train loss': loss_plt['train'],
'eval loss': loss_plt['eval'],
'train accuracy': acc_plt['train'],
'eval accuracy': acc_plt['eval'],
'Training active_prob: ': act_prob_plt
}, f)
f.close()
# Training
def train(train_loader, model, criterion, optimizer, lr, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acc = AverageMeter()
layer_act_prob = {
'conv1': AverageMeter(),
'conv2': AverageMeter(),
'conv3': AverageMeter(),
'conv4': AverageMeter(),
'conv5': AverageMeter(),
'conv6': AverageMeter()
}
model.train()
end = time.time()
for i, (inputs, targets) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
if args.fp16:
inputs = inputs.cuda().half()
if (args.rp or args.sign_mask) and i % args.proj_update_freq == 0:
model.setup_rp()
# print('Re-projecting weights')
inputs_var, targets_var = Variable(inputs), Variable(targets)
# compute output
outputs, active_prob = model(inputs_var)
loss = criterion(outputs, targets_var)
# measure accuracy and record loss
m_acc = accuracy(outputs.data, targets)
losses.update(loss.data[0], inputs.size(0))
acc.update(m_acc, inputs.size(0))
# for l in range(1, 7):
# layer_act_prob['conv%d' % l].update(active_prob[l-1], inputs.size(0))
# compute gradients and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure computation time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}] '
'Time {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Loss {loss.val:.4f} ({loss.avg:.4f}) '
'Acc {acc.val:.3f} ({acc.avg:.3f}) '
'lr {lr} '.format(
epoch, i, len(train_loader), batch_time=batch_time,
loss=losses, acc=acc, lr=lr))
return losses.avg, acc.avg, layer_act_prob
def accuracy(outputs, targets):
_, predicted = torch.max(outputs, 1)
batch_size = targets.size(0)
correct = predicted.eq(targets).float().sum()
return correct * (100.0 / batch_size)
def calc_error_rate(outputs, targets):
return 1 - accuracy(outputs, targets)
def evaluate(data_loader, model, criterion):
# global best_acc
batch_time = AverageMeter()
losses = AverageMeter()
acc = AverageMeter()
# switch to evaluate mode
model.eval()
if args.rp:
model.setup_rp()
end = time.time()
for i, (inputs, targets) in enumerate(data_loader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda(async=True)
if args.fp16:
inputs = inputs.cuda().half()
inputs_var = torch.autograd.Variable(inputs, volatile=True)
targets_var = torch.autograd.Variable(targets, volatile=True)
if i == 10: model.saving_mask()
# compute output and loss
outputs, _ = model(inputs_var)
m_loss = criterion(outputs, targets_var)
# measure acc and record loss
m_acc = accuracy(outputs.data, targets)
# m_error_rate = calc_error_rate(outputs.data, targets)
losses.update(m_loss.data[0], inputs.size(0))
acc.update(m_acc, inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss: {loss.val:.4f} ({loss.avg:.4f})\t'
'Accuracy: {acc.val:.3f} ({acc.avg:.3f})\t'.format(
i, len(data_loader), batch_time=batch_time, loss=losses,
| |
and word[3] != "o" and word[4] != "O" and word[4] != "o" and word[5] != "O" and word[5] != "o" and word[6] != "O" and word[6] != "o" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "o" + ", "
if guessChar == "P" or guessChar == "p" :
if word[1] == "P" or word[1] == "p" :
toGuess = toGuess[:1] + "p" + toGuess[2:]
if word[2] == "P" or word[2] == "p" :
toGuess = toGuess[:2] + "p" + toGuess[3:]
if word[3] == "P" or word[3] == "p" :
toGuess = toGuess[:3] + "p" + toGuess[4:]
if word[4] == "P" or word[4] == "p" :
toGuess = toGuess[:4] + "p" + toGuess[5:]
if word[5] == "P" or word[5] == "p" :
toGuess = toGuess[:5] + "p" + toGuess[6:]
if word[6] == "P" or word[6] == "p" :
toGuess = toGuess[:6] + "p" + toGuess[7:]
if word[1] != "P" and word[1] != "p" and word[2] != "P" and word[2] != "p" and word[3] != "P" and word[3] != "p" and word[4] != "P" and word[4] != "p" and word[5] != "P" and word[5] != "p" and word[6] != "P" and word[6] != "p" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "p" + ", "
if guessChar == "Q" or guessChar == "q" :
if word[1] == "Q" or word[1] == "q" :
toGuess = toGuess[:1] + "q" + toGuess[2:]
if word[2] == "Q" or word[2] == "q" :
toGuess = toGuess[:2] + "q" + toGuess[3:]
if word[3] == "Q" or word[3] == "q" :
toGuess = toGuess[:3] + "q" + toGuess[4:]
if word[4] == "Q" or word[4] == "q" :
toGuess = toGuess[:4] + "q" + toGuess[5:]
if word[5] == "Q" or word[5] == "q" :
toGuess = toGuess[:5] + "q" + toGuess[6:]
if word[6] == "Q" or word[6] == "q" :
toGuess = toGuess[:6] + "q" + toGuess[7:]
if word[1] != "Q" and word[1] != "q" and word[2] != "Q" and word[2] != "q" and word[3] != "Q" and word[3] != "q" and word[4] != "Q" and word[4] != "q" and word[5] != "Q" and word[5] != "q" and word[6] != "Q" and word[6] != "q" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "q" + ", "
if guessChar == "R" or guessChar == "r" :
if word[1] == "R" or word[1] == "r" :
toGuess = toGuess[:1] + "r" + toGuess[2:]
if word[2] == "R" or word[2] == "r" :
toGuess = toGuess[:2] + "r" + toGuess[3:]
if word[3] == "R" or word[3] == "r" :
toGuess = toGuess[:3] + "r" + toGuess[4:]
if word[4] == "R" or word[4] == "r" :
toGuess = toGuess[:4] + "r" + toGuess[5:]
if word[5] == "R" or word[5] == "r" :
toGuess = toGuess[:5] + "r" + toGuess[6:]
if word[6] == "R" or word[6] == "r" :
toGuess = toGuess[:6] + "r" + toGuess[7:]
if word[1] != "R" and word[1] != "r" and word[2] != "R" and word[2] != "r" and word[3] != "R" and word[3] != "r" and word[4] != "R" and word[4] != "r" and word[5] != "R" and word[5] != "r" and word[6] != "R" and word[6] != "r" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "r" + ", "
if guessChar == "S" or guessChar == "s" :
if word[1] == "S" or word[1] == "s" :
toGuess = toGuess[:1] + "s" + toGuess[2:]
if word[2] == "S" or word[2] == "s" :
toGuess = toGuess[:2] + "s" + toGuess[3:]
if word[3] == "S" or word[3] == "s" :
toGuess = toGuess[:3] + "s" + toGuess[4:]
if word[4] == "S" or word[4] == "s" :
toGuess = toGuess[:4] + "s" + toGuess[5:]
if word[5] == "S" or word[5] == "s" :
toGuess = toGuess[:5] + "s" + toGuess[6:]
if word[6] == "S" or word[6] == "s" :
toGuess = toGuess[:6] + "s" + toGuess[7:]
if word[1] != "S" and word[1] != "s" and word[2] != "S" and word[2] != "s" and word[3] != "S" and word[3] != "s" and word[4] != "S" and word[4] != "s" and word[5] != "S" and word[5] != "s" and word[6] != "S" and word[6] != "s" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "s" + ", "
if guessChar == "T" or guessChar == "t" :
if word[1] == "T" or word[1] == "t" :
toGuess = toGuess[:1] + "t" + toGuess[2:]
if word[2] == "T" or word[2] == "t" :
toGuess = toGuess[:2] + "t" + toGuess[3:]
if word[3] == "T" or word[3] == "t" :
toGuess = toGuess[:3] + "t" + toGuess[4:]
if word[4] == "T" or word[4] == "t" :
toGuess = toGuess[:4] + "t" + toGuess[5:]
if word[5] == "T" or word[5] == "t" :
toGuess = toGuess[:5] + "t" + toGuess[6:]
if word[6] == "T" or word[6] == "t" :
toGuess = toGuess[:6] + "t" + toGuess[7:]
if word[1] != "T" and word[1] != "t" and word[2] != "T" and word[2] != "t" and word[3] != "T" and word[3] != "t" and word[4] != "T" and word[4] != "t" and word[5] != "T" and word[5] != "t" and word[6] != "T" and word[6] != "t" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "t" + ", "
if guessChar == "U" or guessChar == "u" :
if word[1] == "U" or word[1] == "u" :
toGuess = toGuess[:1] + "u" + toGuess[2:]
if word[2] == "U" or word[2] == "u" :
toGuess = toGuess[:2] + "u" + toGuess[3:]
if word[3] == "U" or word[3] == "u" :
toGuess = toGuess[:3] + "u" + toGuess[4:]
if word[4] == "U" or word[4] == "u" :
toGuess = toGuess[:4] + "u" + toGuess[5:]
if word[5] == "U" or word[5] == "u" :
toGuess = toGuess[:5] + "u" + toGuess[6:]
if word[6] == "U" or word[6] == "u" :
toGuess = toGuess[:6] + "u" + toGuess[7:]
if word[1] != "U" and word[1] != "u" and word[2] != "U" and word[2] != "u" and word[3] != "U" and word[3] != "u" and word[4] != "U" and word[4] != "u" and word[5] != "U" and word[5] != "u" and word[6] != "U" and word[6] != "u" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "u" + ", "
if guessChar == "V" or guessChar == "v" :
if word[1] == "V" or word[1] == "v" :
toGuess = toGuess[:1] + "v" + toGuess[2:]
if word[2] == "V" or word[2] == "v" :
toGuess = toGuess[:2] + "v" + toGuess[3:]
if word[3] == "V" or word[3] == "v" :
toGuess = toGuess[:3] + "v" + toGuess[4:]
if word[4] == "V" or word[4] == "v" :
toGuess = toGuess[:4] + "v" + toGuess[5:]
if word[5] == "V" or word[5] == "v" :
toGuess = toGuess[:5] + "v" + toGuess[6:]
if word[6] == "V" or word[6] == "v" :
toGuess = toGuess[:6] + "v" + toGuess[7:]
if word[1] != "V" and word[1] != "v" and word[2] != "V" and word[2] != "v" and word[3] != "V" and word[3] != "v" and word[4] != "V" and word[4] != "v" and word[5] != "V" and word[5] != "v" and word[6] != "V" and word[6] != "v" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "v" + ", "
if guessChar == "W" or guessChar == "w" :
if word[1] == "W" or word[1] == "w" :
toGuess = toGuess[:1] + "w" + toGuess[2:]
if word[2] == "W" or word[2] == "w" :
toGuess = toGuess[:2] + "w" + toGuess[3:]
if word[3] == "W" or word[3] == "w" :
toGuess = toGuess[:3] + "w" + toGuess[4:]
if word[4] == "W" or word[4] == "w" :
toGuess = toGuess[:4] + "w" + toGuess[5:]
if word[5] == "W" or word[5] == "w" :
toGuess = toGuess[:5] + "w" + toGuess[6:]
if word[6] == "W" or word[6] == "w" :
toGuess = toGuess[:6] + "w" + toGuess[7:]
if word[1] != "W" and word[1] != "w" and word[2] != "W" and word[2] != "w" and word[3] != "W" and word[3] != "w" and word[4] != "W" and word[4] != "w" and word[5] != "W" and word[5] != "w" and word[6] != "W" and word[6] != "w" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "w" + ", "
if guessChar == "X" or guessChar == "x" :
if word[1] == "X" or word[1] == "x" :
toGuess = toGuess[:1] + "x" + toGuess[2:]
if word[2] == "X" or word[2] == "x" :
toGuess = toGuess[:2] | |
full_delex:
if value == 'yes':
value = 'family friendly'
else:
value = 'not family friendly'
elif value == "yes" or value == "no":
value = attribute + "_" + value
if forTrain and singlePredicate not in self.attributes:
self.attributes[singlePredicate] = set()
if attribute:
if forTrain:
self.attributes[singlePredicate].add(attribute)
attributeValues[attribute] = value
for attribute in ['area', 'food', 'name', 'near', 'eattype', 'pricerange', 'customer_rating', 'pricerange', 'customer_rating', 'familyfriendly']:
delexValue = Action.TOKEN_X + attribute + "_0"
if attribute in attributeValues and delexValue not in delexicalizedMap:
value = attributeValues[attribute]
if attribute == 'name' or attribute == 'near' or full_delex:
v = re.sub("([.,?:;!'-])", " \g<1> ", value)
v = v.replace("\\?", " \\? ").replace("\\.", " \\.").replace(",", " , ").replace(" ", " ").strip().lower()
if " " + v + " " in " " + refPart + " ":
delexicalizedMap[delexValue] = v
value = delexValue
elif full_delex:
temp_value = full_delexicalize_E2E(delexicalizedMap, attribute, v, attributeValues, refPart)
if temp_value:
delexicalizedMap[delexValue] = temp_value
value = delexValue
if delexValue in delexicalizedMap:
if (" " + delexicalizedMap[delexValue].lower() + " ") in (" " + refPart + " "):
refPart = (" " + refPart + " ").replace((" " + delexicalizedMap[delexValue].lower() + " "),
(" " + delexValue + " ")).strip()
attributeValues[attribute] = value
'''
for attribute in ['area', 'food', 'name', 'near', 'eattype', 'pricerange', 'customer_rating', 'familyfriendly']:
delexValue = Action.TOKEN_X + attribute + "_0"
if attribute in attributeValues and delexValue not in delexicalizedMap:
if attribute == 'familyfriendly':
err += 1
print(attribute)
print(attributeValues[attribute])
print(refPart)
print('-----------------------------------')
'''
observedWordSequence = []
refPart = refPart.replace(", ,", " , ").replace(". .", " . ").replace('"', ' " ').replace(" ", " ").strip()
refPart = " ".join(refPart.split())
if refPart:
words = refPart.split(" ")
for word in words:
word = word.strip()
if word:
if "0f" in word:
word = word.replace("0f", "of")
m = re.search("^@x@([a-z]+)_([0-9]+)", word)
if m and m.group(0) != word:
var = m.group(0)
realValue = delexicalizedMap.get(var)
realValue = word.replace(var, realValue)
delexicalizedMap[var] = realValue
observedWordSequence.append(var.strip())
else:
m = re.match("([0-9]+)([a-z]+)", word)
if m and m.group(1).strip() == "o":
observedWordSequence.add(m.group(1).strip() + "0")
elif m:
observedWordSequence.append(m.group(1).strip())
observedWordSequence.append(m.group(2).strip())
else:
m = re.match("([a-z]+)([0-9]+)", word)
if m and (m.group(1).strip() == "l" or m.group(1).strip() == "e"):
observedWordSequence.append("£" + m.group(2).strip())
elif m:
observedWordSequence.append(m.group(1).strip())
observedWordSequence.append(m.group(2).strip())
else:
m = re.match("(£)([a-z]+)", word)
if m:
observedWordSequence.append(m.group(1).strip())
observedWordSequence.append(m.group(2).strip())
else:
m = re.match("([a-z]+)(£[0-9]+)", word)
if m:
observedWordSequence.append(m.group(1).strip())
observedWordSequence.append(m.group(2).strip())
else:
m = re.match("([0-9]+)([a-z]+)([0-9]+)", word)
if m:
observedWordSequence.append(m.group(1).strip())
observedWordSequence.append(m.group(2).strip())
observedWordSequence.append(m.group(3).strip())
else:
m = re.match("([0-9]+)(@x@[a-z]+_0)", word)
if m:
observedWordSequence.append(m.group(1).strip())
observedWordSequence.append(m.group(2).strip())
else:
m = re.match("(£[0-9]+)([a-z]+)", word)
if m and m.group(2).strip() == "o":
observedWordSequence.append(m.group(1).strip() + "0")
else:
observedWordSequence.append(word.strip())
MR = MeaningRepresentation(singlePredicate, attributeValues, MRPart, delexicalizedMap)
# We store the maximum observed word sequence length, to use as a limit during generation
if forTrain and len(observedWordSequence) > self.maxWordSequenceLength:
self.maxWordSequenceLength = len(observedWordSequence)
# We initialize the alignments between words and attribute/value pairs
wordToAttrValueAlignment = []
for word in observedWordSequence:
if re.match("[.,?:;!'\"]", word.strip()):
wordToAttrValueAlignment.append(Action.TOKEN_PUNCT)
else:
wordToAttrValueAlignment.append("[]")
directReferenceSequence = []
for r, word in enumerate(observedWordSequence):
directReferenceSequence.append(Action(word, wordToAttrValueAlignment[r], "word"))
if forTrain:
self.vocabulary.add(word)
alingedAttributes = []
if directReferenceSequence:
# Align subphrases of the sentence to attribute values
observedValueAlignments = {}
valueToAttr = {}
for attr in MR.attributeValues.keys():
value = MR.attributeValues[attr]
if not value.startswith(Action.TOKEN_X) and not full_delex:
observedValueAlignments[value] = set()
valueToAttr[value] = attr
valuesToCompare = set()
valuesToCompare.update([value, attr])
valuesToCompare.update(value.split(" "))
valuesToCompare.update(attr.split(" "))
valuesToCompare.update(attr.split("_"))
for valueToCompare in valuesToCompare:
# obtain n-grams from the sentence
for n in range(1, 6):
grams = ngrams(directReferenceSequence, n)
# calculate the similarities between each gram and valueToCompare
for gram in grams:
if Action.TOKEN_X not in [o.label for o in gram].__str__() and Action.TOKEN_PUNCT not in [o.attribute for o in gram]:
compare = " ".join(o.label for o in gram)
backwardCompare = " ".join(o.label for o in reversed(gram))
if compare.strip():
# Calculate the character-level distance between the value and the nGram (in its original and reversed order)
distance = Levenshtein.ratio(valueToCompare.lower(), compare.lower())
backwardDistance = Levenshtein.ratio(valueToCompare.lower(), backwardCompare.lower())
# We keep the best distance score; note that the Levenshtein distance is normalized so that greater is better
if backwardDistance > distance:
distance = backwardDistance
if (distance > 0.3):
observedValueAlignments[value].add((gram, distance))
while observedValueAlignments.keys():
# Find the best aligned nGram
max = -1000
bestGrams = {}
toRemove = set()
for value in observedValueAlignments.keys():
if observedValueAlignments[value]:
for gram, distance in observedValueAlignments[value]:
if distance > max:
max = distance
bestGrams = {}
if distance == max:
bestGrams[gram] = value
else:
toRemove.add(value)
for value in toRemove:
del observedValueAlignments[value]
# Going with the latest occurance of a matched ngram works best when aligning with hard alignments
# Because all the other match ngrams that occur to the left of the latest, will probably be aligned as well
'''
maxOccurance = -1
bestGram = False
bestValue = False
for gram in bestGrams:
occur = self.find_subList_in_actionList(gram, directReferenceSequence)[0] + len(gram)
if occur > maxOccurance:
maxOccurance = occur
bestGram = gram
bestValue = bestGrams[gram]
'''
# Otherwise might be better to go for the longest ngram
maxLen = 0
bestGram = False
bestValue = False
for gram in sorted(bestGrams):
if len(gram) > maxLen:
maxLen = len(gram)
bestGram = gram
bestValue = bestGrams[gram]
if bestGram:
# Find the subphrase that corresponds to the best aligned nGram
bestGramPos = self.find_subList_in_actionList(bestGram, directReferenceSequence)
if bestGramPos:
# Only apply the gram if the position is not already aligned
unalignedRange = True
for i in range(bestGramPos[0], bestGramPos[1] + 1):
if directReferenceSequence[i].attribute != '[]':
unalignedRange = False
if unalignedRange:
for i in range(bestGramPos[0], bestGramPos[1] + 1):
directReferenceSequence[i].attribute = valueToAttr[bestValue]
alingedAttributes.append(directReferenceSequence[i].attribute)
if forTrain:
# Store the best aligned nGram
if bestValue not in self.valueAlignments.keys():
self.valueAlignments[bestValue] = {}
self.valueAlignments[bestValue][bestGram] = max
# And remove it from the observed ones for this instance
del observedValueAlignments[bestValue]
else:
observedValueAlignments[bestValue].remove((bestGram, max))
else:
observedValueAlignments[bestValue].remove((bestGram, max))
for action in directReferenceSequence:
if action.label.startswith(Action.TOKEN_X):
attr = action.label[3:action.label.rfind('_')]
if attr not in alingedAttributes:
action.attribute = attr
alingedAttributes.append(action.attribute)
if full_delex:
alingedAttributes = []
for action in directReferenceSequence:
if action.label.startswith(Action.TOKEN_X):
attr = action.label[3:action.label.rfind('_')]
action.attribute = attr
alingedAttributes.append(action.attribute)
# If not all attributes are aligned, ignore the instance from training?
# Alternatively, we could align them randomly; certainly not ideal, but usually it concerns edge cases
if not forTrain and full_delex:
for attr in MR.attributeValues:
MR.attributeValues[attr] = Action.TOKEN_X + attr + "_0"
if full_delex and infer_MRs and forTrain and MR.attributeValues.keys() != set(alingedAttributes) and len(alingedAttributes) > 0:
for attr in [o for o in MR.attributeValues.keys()]:
if attr not in alingedAttributes:
del MR.attributeValues[attr]
MR.getAbstractMR(True)
if (MR.attributeValues.keys() == set(alingedAttributes) and (not full_delex or len(MR.attributeValues.keys()) == len(alingedAttributes))) or not forTrain:
if forTrain:
directReferenceSequence = inferNaiveAlignments(MR, directReferenceSequence)
DI = DatasetInstance(MR, directReferenceSequence, self.postProcessRef(MR, directReferenceSequence))
instances[singlePredicate].append(DI)
return instances
def createLists_SFX(self, dataFile, forTrain=False):
print("Create lists from ", dataFile, "...")
instances = dict()
dataPart = []
# We read the data from the data files.
with open(dataFile, encoding="utf8") as f:
lines = f.readlines()
for s in lines:
s = str(s)
if s.startswith("\""):
dataPart.append(s)
num = 0
# Each line corresponds to a MR
with open(dataFile) as f:
dataPart = json.load(f)
for line in dataPart:
num += 1
MRPart = line[0].lower().strip()
refPart = line[1].lower().strip()
if refPart.startswith("\"") and refPart.endswith("\""):
refPart = refPart[1:-1]
if MRPart.startswith("\""):
MRPart = MRPart[1:]
if refPart.startswith("\""):
refPart = refPart[1:]
if refPart.endswith("\""):
refPart = refPart[:-1]
refPart = re.sub("([.,?:;!'-])", " \g<1> ", refPart)
refPart = refPart.replace("\\?", " \\? ").replace("\\.", " \\.").replace(",", " , ").replace(" ", " ").strip()
refPart = refPart.replace(" hotels ", " hotel -s ")
refPart = " ".join(refPart.split())
predicate = MRPart[:MRPart.find('(')]
if predicate not in self.predicates:
self.predicates.append(predicate)
if predicate not in instances:
instances[predicate] = []
MRAttrValues = MRPart[MRPart.find('(') + 1:MRPart.rfind(')')].split(";")
# Map from original values to delexicalized values
delexicalizedMap = {}
# Map attributes to their values
attributeValues = {}
for attrValue in MRAttrValues:
if not attrValue:
attribute = 'none'
value = attribute + '_none'
elif '=' in attrValue:
attrValue = attrValue.split('=')
attribute = attrValue[0].strip().lower()
value = attrValue[1].strip().lower()
if value.startswith("'") and value.endswith("'"):
value = value[1:-1]
else:
attribute = attrValue
value = attribute + '_none'
if forTrain and predicate not in self.attributes:
self.attributes[predicate] = set()
if attribute:
if forTrain:
self.attributes[predicate].add(attribute)
if attribute in attributeValues:
if (value == 'yes' and | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The F4PGA Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
""" Classes for representing and creating a physical netlist in python,
along with reading and writing the physical netlist format.
Useful starting points:
PhysicalNetlist - class that can read and write physical netlist format,
and be constructed and inspected from Python.
"""
import enum
from collections import namedtuple
from .route_stitching import RoutingTree, stitch_segments, flatten_segments
# Physical cell type enum.
class PhysicalCellType(enum.Enum):
Locked = 0
Port = 1
Gnd = 2
Vcc = 3
class PhysicalNetType(enum.Enum):
# Net is just a signal, not a VCC or GND tied net.
Signal = 0
# Net is tied to GND.
Gnd = 1
# Net is tied to VCC.
Vcc = 2
# Represents an active pip between two tile wires.
#
# tile (str) - Name of tile
# wire0 (str) - Name of upstream wire to pip
# wire1 (str) - Name of downstream wire from pip
# forward (bool) - For bidirectional pips, is the connection from wire0 to
# wire1 (forward=True) or wire1 to wire0 (forward=False).
Pip = namedtuple('Pip', 'tile wire0 wire1 forward')
# Pin placement directive
#
# Associates a BEL pin with a Cell pin
#
# bel_pin (str) - Name of BEL pin being associated
# cell_pin (str) - Name of Cell pin being associated
# bel (str) - Name of BEL that contains BEL pin. If None is BEL from Placement
# class.
# other_cell_type (str) - Used to define multi cell mappings.
# other_cell_name (str) - Used to define multi cell mappings.
Pin = namedtuple('Pin', 'bel_pin cell_pin bel other_cell_type other_cell_name')
PhysicalNet = namedtuple('PhysicalNet', 'name type sources stubs')
class Placement():
""" Class for defining a Cell placement within a design.
cell_type (str) - Type of cell being placed
cell_name (str) - Name of cell instance being placed.
site (str) - Site Cell is being placed within.
bel (str) - Name of primary BEL being placed.
"""
def __init__(self, cell_type, cell_name, site, bel):
self.cell_type = cell_type
self.cell_name = cell_name
self.site = site
self.bel = bel
self.pins = []
self.other_bels = set()
def add_bel_pin_to_cell_pin(self,
bel_pin,
cell_pin,
bel=None,
other_cell_type=None,
other_cell_name=None):
""" Add a BEL pin -> Cell pin association.
bel_pin (str) - Name of BEL pin being associated.
cell_pin (str) - NAme of Cell pin being associated.
"""
if bel is None:
bel = self.bel
elif bel != self.bel:
self.other_bels.add(bel)
self.pins.append(
Pin(
bel_pin=bel_pin,
cell_pin=cell_pin,
bel=bel,
other_cell_type=other_cell_type,
other_cell_name=other_cell_name,
))
def descend_branch(obj, node, string_id):
""" Descend a branch to continue outputting the interchange to capnp object. """
obj.init('branches', len(node.branches))
for branch_obj, branch in zip(obj.branches, node.branches):
branch.output_interchange(branch_obj, string_id)
class PhysicalBelPin():
""" Python class that represents a BEL pin in a physical net.
site (str) - Site containing BEL pin
bel (str) - BEL containing BEL pin
pin (str) - BEL pin in physical net.
direction (Direction) - Direction of BEL pin.
"""
def __init__(self, site, bel, pin):
self.site = site
self.bel = bel
self.pin = pin
self.branches = []
def output_interchange(self, obj, string_id):
""" Output this route segment and all branches beneth it.
obj (physical_netlist.RouteBranch pycapnp object) -
Object to write PhysicalBelPin into
string_id (str -> int) - Function to intern strings into PhysNetlist
string list.
"""
obj.routeSegment.init('belPin')
obj.routeSegment.belPin.site = string_id(self.site)
obj.routeSegment.belPin.bel = string_id(self.bel)
obj.routeSegment.belPin.pin = string_id(self.pin)
descend_branch(obj, self, string_id)
def get_device_resource(self, site_types, device_resources):
""" Get device resource that corresponds to this class. """
return device_resources.bel_pin(self.site, site_types[self.site],
self.bel, self.pin)
def to_tuple(self):
""" Create tuple suitable for sorting this object.
This tuple is used for sorting against other routing branch objects
to generate a canonical routing tree.
"""
return ('bel_pin', self.site, self.bel, self.pin)
def __str__(self):
return 'PhysicalBelPin({}, {}, {})'.format(
repr(self.site),
repr(self.bel),
repr(self.pin),
)
class PhysicalSitePin():
""" Python class that represents a site pin in a physical net.
site (str) - Site containing site pin
pin (str) - Site pin in physical net.
"""
def __init__(self, site, pin):
self.site = site
self.pin = pin
self.branches = []
def output_interchange(self, obj, string_id):
""" Output this route segment and all branches beneth it.
obj (physical_netlist.RouteBranch pycapnp object) -
Object to write PhysicalBelPin into
string_id (str -> int) - Function to intern strings into PhysNetlist
string list.
"""
obj.routeSegment.init('sitePin')
obj.routeSegment.sitePin.site = string_id(self.site)
obj.routeSegment.sitePin.pin = string_id(self.pin)
descend_branch(obj, self, string_id)
def get_device_resource(self, site_types, device_resources):
""" Get device resource that corresponds to this class. """
return device_resources.site_pin(self.site, site_types[self.site],
self.pin)
def to_tuple(self):
""" Create tuple suitable for sorting this object.
This tuple is used for sorting against other routing branch objects
to generate a canonical routing tree.
"""
return ('site_pin', self.site, self.pin)
def __str__(self):
return 'PhysicalSitePin({}, {})'.format(
repr(self.site),
repr(self.pin),
)
class PhysicalPip():
""" Python class that represents a active pip in a physical net.
tile (str) - Tile containing pip
wire0 (str) - Name of upstream wire to pip
wire1 (str) - Name of downstream wire from pip
forward (bool) - For bidirectional pips, is the connection from wire0 to
wire1 (forward=True) or wire1 to wire0 (forward=False).
site (str) - name of site in case of Pseudo PIP
"""
def __init__(self, tile, wire0, wire1, forward=True, site=None):
self.tile = tile
self.wire0 = wire0
self.wire1 = wire1
self.forward = forward
self.site = site
self.branches = []
def output_interchange(self, obj, string_id):
""" Output this route segment and all branches beneth it.
obj (physical_netlist.RouteBranch pycapnp object) -
Object to write PhysicalBelPin into
string_id (str -> int) - Function to intern strings into PhysNetlist
string list.
"""
obj.routeSegment.init('pip')
obj.routeSegment.pip.tile = string_id(self.tile)
obj.routeSegment.pip.wire0 = string_id(self.wire0)
obj.routeSegment.pip.wire1 = string_id(self.wire1)
obj.routeSegment.pip.forward = self.forward
obj.routeSegment.pip.isFixed = True
descend_branch(obj, self, string_id)
def get_device_resource(self, site_types, device_resources):
""" Get device resource that corresponds to this class. """
return device_resources.pip(self.tile, self.wire0, self.wire1)
def to_tuple(self):
""" Create tuple suitable for sorting this object.
This tuple is used for sorting against other routing branch objects
to generate a canonical routing tree.
"""
return ('pip', self.tile, self.wire0, self.wire1)
def __str__(self):
return 'PhysicalPip({}, {}, {}, {}, {})'.format(
repr(self.tile),
repr(self.wire0),
repr(self.wire1),
repr(self.forward),
repr(self.site),
)
class PhysicalSitePip():
""" Python class that represents a site pip in a physical net.
This models site routing muxes and site inverters.
site (str) - Site containing site pip
bel (str) - Name of BEL that contains site pip
pin (str) - Name of BEL pin that is the active site pip
is_inverting (bool) - Indicates whether the site PIP is inverted
"""
def __init__(self, site, bel, pin, is_inverting=False):
self.site = site
self.bel = bel
self.pin = pin
self.is_inverting = is_inverting
self.branches = []
def output_interchange(self, obj, string_id):
""" Output this route segment and all branches beneth it.
obj (physical_netlist.RouteBranch pycapnp object) -
Object to write PhysicalBelPin into
string_id (str -> int) - Function to intern strings into PhysNetlist
string list.
"""
obj.routeSegment.init('sitePIP')
obj.routeSegment.sitePIP.site = string_id(self.site)
obj.routeSegment.sitePIP.bel = string_id(self.bel)
obj.routeSegment.sitePIP.pin = string_id(self.pin)
obj.routeSegment.sitePIP.isInverting = self.is_inverting
descend_branch(obj, self, string_id)
def get_device_resource(self, site_types, device_resources):
""" Get device resource that corresponds to this class. """
return device_resources.site_pip(self.site, site_types[self.site],
self.bel, self.pin)
def to_tuple(self):
""" Create tuple suitable for sorting this object.
This tuple is used for sorting against other routing branch objects
to generate a canonical routing tree.
"""
return ('site_pip', self.site, self.bel, self.pin, self.is_inverting)
def __str__(self):
return 'PhysicalSitePip({}, {}, {}, {})'.format(
repr(self.site),
repr(self.bel),
repr(self.pin),
repr(self.is_inverting),
)
def convert_tuple_to_object(site, tup):
""" Convert physical netlist tuple to object.
Physical netlist tuples are light weight ways to represent the physical
net tree.
site (Site) - Site object that tuple belongs too.
tup (tuple) - Tuple that is either a site pin, bel pin, or site pip.
Returns - PhysicalSitePin, PhysicalBelPin, or PhysicalSitePip based on
tuple.
>>> Site = namedtuple('Site', 'name')
>>> site = Site(name='TEST_SITE')
>>> site_pin = convert_tuple_to_object(site, ('site_pin', 'TEST_PIN'))
>>> assert isinstance(site_pin, PhysicalSitePin)
>>> site_pin.site
'TEST_SITE'
>>> site_pin.pin
'TEST_PIN'
>>> site_pin.branches
[]
>>> bel_pin = convert_tuple_to_object(site, ('bel_pin', 'ABEL', 'APIN'))
>>> assert isinstance(bel_pin, PhysicalBelPin)
>>> bel_pin.site
'TEST_SITE'
>>> bel_pin.bel
'ABEL'
>>> bel_pin.pin
'APIN'
>>> site_pip = convert_tuple_to_object(site, ('site_pip', 'BBEL', 'BPIN'))
>>> assert isinstance(site_pip, PhysicalSitePip)
>>> site_pip.site
'TEST_SITE'
>>> site_pip.bel
'BBEL'
>>> | |
"▁edition": 21752,
"▁leader": 21753,
"Secret": 21754,
"eń": 21755,
"lase": 21756,
"struct": 21757,
"tamina": 21758,
"▁Stanisław": 21759,
"▁baro": 21760,
"▁twee": 21761,
"indra": 21762,
"nello": 21763,
"statt": 21764,
"▁Something": 21765,
"▁Tant": 21766,
"▁Workshop": 21767,
"▁million": 21768,
"▁nin": 21769,
"江": 21770,
"[12]": 21771,
"eso": 21772,
"timer": 21773,
"▁Skor": 21774,
"▁Weekend": 21775,
":08": 21776,
"bito": 21777,
"hada": 21778,
"nana": 21779,
"▁Anima": 21780,
"▁Banco": 21781,
"▁ut": 21782,
"gnu": 21783,
"inio": 21784,
"joh": 21785,
"pito": 21786,
"▁BBM": 21787,
"▁dra": 21788,
"▁mir": 21789,
"▁rad": 21790,
"▁report": 21791,
"Hey": 21792,
"oner": 21793,
"rance": 21794,
"sual": 21795,
"▁Berri": 21796,
"▁Jakub": 21797,
"▁Jubil": 21798,
"▁Medan": 21799,
"▁mine": 21800,
"▁minima": 21801,
"▁stand": 21802,
"Speed": 21803,
"VOR": 21804,
"bore": 21805,
"kuk": 21806,
"torio": 21807,
"istus": 21808,
"posi": 21809,
"éré": 21810,
"▁BNG": 21811,
"▁Domain": 21812,
"▁kill": 21813,
"昭": 21814,
"MON": 21815,
"fog": 21816,
"▁Duna": 21817,
"▁Jaipur": 21818,
"▁stati": 21819,
"TOL": 21820,
"player": 21821,
"shit": 21822,
"▁Colour": 21823,
"▁Jalan": 21824,
"▁NFL": 21825,
"▁Strategi": 21826,
"▁Trag": 21827,
"▁Treasure": 21828,
"▁cloud": 21829,
"▁shan": 21830,
"CEL": 21831,
"iju": 21832,
"vate": 21833,
"width": 21834,
"yh": 21835,
"▁Repubblica": 21836,
"▁Shafi": 21837,
"▁atra": 21838,
"▁recu": 21839,
"estero": 21840,
"moda": 21841,
"nding": 21842,
"peti": 21843,
"ukai": 21844,
"zou": 21845,
"▁Azad": 21846,
"▁Ero": 21847,
"▁arc": 21848,
"Tal": 21849,
"hash": 21850,
"lapse": 21851,
"élé": 21852,
"▁Blanca": 21853,
"▁Diyarbakır": 21854,
"▁Fonda": 21855,
"▁Ibu": 21856,
"▁Kasu": 21857,
"▁before": 21858,
"TSI": 21859,
"ři": 21860,
"▁Aslan": 21861,
"▁corda": 21862,
"▁family": 21863,
"CIN": 21864,
"pilot": 21865,
"solve": 21866,
"▁Fail": 21867,
"▁Morgen": 21868,
"▁colo": 21869,
"▁sie": 21870,
"親": 21871,
"German": 21872,
"ONO": 21873,
"edici": 21874,
"http": 21875,
"ober": 21876,
"umu": 21877,
"▁andro": 21878,
":17": 21879,
"OGA": 21880,
"oren": 21881,
"▁Baga": 21882,
"Vel": 21883,
"cert": 21884,
"irat": 21885,
"▁Blend": 21886,
"▁Nasi": 21887,
"▁Sampai": 21888,
"▁crack": 21889,
"▁machine": 21890,
"Thomas": 21891,
"badan": 21892,
"ganj": 21893,
"guen": 21894,
"vite": 21895,
"▁Nothing": 21896,
"▁alde": 21897,
"▁domain": 21898,
"▁factor": 21899,
"ağa": 21900,
"fag": 21901,
"tile": 21902,
"▁Gür": 21903,
"▁TNI": 21904,
"し": 21905,
"!),": 21906,
"Special": 21907,
"▁(25)": 21908,
"▁APA": 21909,
"▁Exam": 21910,
"▁movie": 21911,
"▁record": 21912,
"iyat": 21913,
"neb": 21914,
"xar": 21915,
"▁Aprili": 21916,
"▁Loca": 21917,
"▁Lugo": 21918,
"▁health": 21919,
"▁podcast": 21920,
"Berlin": 21921,
"hail": 21922,
"iling": 21923,
"oci": 21924,
"ogi": 21925,
"palle": 21926,
"▁NGHIỆP": 21927,
"▁Woll": 21928,
"▁deal": 21929,
"▁quadra": 21930,
"6,000": 21931,
":04": 21932,
"LIM": 21933,
"erer": 21934,
"szcze": 21935,
"édi": 21936,
"▁Vrij": 21937,
"▁Wada": 21938,
"▁como": 21939,
"DVD": 21940,
"kup": 21941,
"▁dy": 21942,
"▁mentor": 21943,
"▁rat": 21944,
"EAR": 21945,
"UCI": 21946,
"arreta": 21947,
"comp": 21948,
"eret": 21949,
"ruti": 21950,
"▁Garcinia": 21951,
"▁Municipal": 21952,
"▁distinct": 21953,
"EH": 21954,
"ELL": 21955,
"gung": 21956,
"lister": 21957,
"saba": 21958,
"▁Buena": 21959,
"▁Guest": 21960,
"▁colla": 21961,
"▁gut": 21962,
"▁task": 21963,
"asti": 21964,
"digital": 21965,
"iyang": 21966,
"remont": 21967,
"reta": 21968,
"veda": 21969,
"▁Guara": 21970,
"▁global": 21971,
"光": 21972,
"aban": 21973,
"stant": 21974,
"▁Integra": 21975,
"▁Panta": 21976,
"郡": 21977,
"==": 21978,
"goda": 21979,
"hele": 21980,
"ibu": 21981,
"irse": 21982,
"pê": 21983,
"thé": 21984,
"tkin": 21985,
"▁Infos": 21986,
"▁Japon": 21987,
"▁Perak": 21988,
"▁Vario": 21989,
"AAR": 21990,
"abay": 21991,
"guera": 21992,
"hh": 21993,
"limo": 21994,
"wiec": 21995,
"▁Carte": 21996,
"▁ESC": 21997,
"▁Iskandar": 21998,
"▁Ova": 21999,
"▁Pueblo": 22000,
"▁soundtrack": 22001,
"上": 22002,
"cense": 22003,
"hormon": 22004,
"inte": 22005,
"lesi": 22006,
"Ras": 22007,
"Tie": 22008,
"WL": 22009,
"ility": 22010,
"jum": 22011,
"shta": 22012,
"▁Horizonte": 22013,
"▁gl": 22014,
"▁pau": 22015,
"9,000": 22016,
"fill": 22017,
"godi": 22018,
"lho": 22019,
"lif": 22020,
"če": 22021,
"▁Plug": 22022,
"▁Thank": 22023,
"▁hau": 22024,
"▁trop": 22025,
"張": 22026,
":07": 22027,
"▁MMORPG": 22028,
"▁Pasi": 22029,
"▁Speech": 22030,
"ciu": 22031,
"▁Donc": 22032,
"▁Nueva": 22033,
"▁OT": 22034,
"▁bilir": 22035,
"CET": 22036,
"GER": 22037,
"amor": 22038,
"pair": 22039,
"▁Patro": 22040,
"▁Unter": 22041,
"▁mexi": 22042,
"▁schi": 22043,
"0.01": 22044,
"Toyota": 22045,
"lew": 22046,
"zna": 22047,
"▁MMC": 22048,
"▁Weil": 22049,
"bare": 22050,
"gem": 22051,
"lare": 22052,
"BIN": 22053,
"Oct": 22054,
"logist": 22055,
"pom": 22056,
"▁Greek": 22057,
"▁Virtu": 22058,
"belt": 22059,
"bez": 22060,
"▁Witte": 22061,
"▁rota": 22062,
"▁vulgar": 22063,
"♪": 22064,
"HUB": 22065,
"cón": 22066,
"loha": 22067,
"ž": 22068,
"▁hidro": 22069,
"▁magnific": 22070,
"▁tibi": 22071,
"devi": 22072,
"kovski": 22073,
"▁(24)": 22074,
"▁use": 22075,
"cf": 22076,
"fier": 22077,
"hiye": 22078,
"monta": 22079,
"oppa": 22080,
"sens": 22081,
"ık": 22082,
"▁Comedy": 22083,
"▁LIVE": 22084,
"▁central": 22085,
"angi": 22086,
"dde": 22087,
"yya": 22088,
"▁Arme": 22089,
"▁Medal": 22090,
"nul": 22091,
"urban": 22092,
"▁SAV": 22093,
"▁condensa": 22094,
"▁three": 22095,
":42": 22096,
"LAR": 22097,
"planet": 22098,
"sade": 22099,
"▁Kodi": 22100,
"▁Teheran": 22101,
"▁ud": 22102,
"vri": 22103,
"▁Barat": 22104,
"▁Bosh": 22105,
"▁Cricket": 22106,
"▁Item": 22107,
"▁Palazzo": 22108,
"▁Süd": 22109,
"ス": 22110,
":19": 22111,
"arda": 22112,
"polo": 22113,
"▁Leit": 22114,
"▁Muslim": 22115,
"▁Plzeň": 22116,
"▁drap": 22117,
"▁eva": 22118,
"▁friend": 22119,
"馬": 22120,
"Amour": 22121,
"CHE": 22122,
"shad": 22123,
"▁dal": 22124,
"JO": 22125,
"YAN": 22126,
"jp": 22127,
"tud": 22128,
"▁CRIS": 22129,
"▁Soviet": 22130,
"▁area": 22131,
"▁far": 22132,
"Ak": 22133,
"PIN": 22134,
"iat": 22135,
"kung": 22136,
"puccino": 22137,
"帝": 22138,
":32": 22139,
"impl": 22140,
"jak": 22141,
"nado": 22142,
"ovna": 22143,
"pice": 22144,
"▁Orta": 22145,
"▁Pré": 22146,
"▁launch": 22147,
"▁tree": 22148,
"istri": 22149,
"pour": 22150,
"shon": 22151,
"▁Brno": 22152,
"▁Justicia": 22153,
"aring": 22154,
"itel": 22155,
"lanti": 22156,
"ngkong": 22157,
"riti": 22158,
"ssé": 22159,
"uj": 22160,
"▁Nika": 22161,
"▁serial": 22162,
":38": 22163,
"jai": 22164,
"▁Chili": 22165,
"▁Llan": 22166,
"▁Puc": 22167,
"▁Rua": 22168,
"▁history": 22169,
"神": 22170,
"fte": 22171,
"ération": 22172,
"▁Niall": 22173,
"▁Zoran": 22174,
"▁lor": 22175,
":14": 22176,
"hallen": 22177,
"lui": 22178,
"ntu": 22179,
"ramos": 22180,
"▁Alegre": 22181,
"▁Fug": 22182,
"▁Kamar": 22183,
"▁interrupt": 22184,
":16": 22185,
"moli": 22186,
"tje": 22187,
"upi": 22188,
"upload": 22189,
"▁funk": 22190,
"super": 22191,
"vala": 22192,
"▁Deg": 22193,
"▁Otel": 22194,
"▁Prakash": 22195,
"With": 22196,
"bern": 22197,
"gey": 22198,
"paro": 22199,
"rf": 22200,
"ubo": 22201,
"vaz": 22202,
"▁1+1": 22203,
"▁Cosa": 22204,
"▁ring": 22205,
":18": 22206,
":33": 22207,
"Biz": 22208,
"ż": 22209,
"▁blo": 22210,
"▁bri": 22211,
"MENA": 22212,
"arma": 22213,
"pori": 22214,
"worm": 22215,
"▁Karta": 22216,
"▁pili": 22217,
"▁shin": 22218,
"河": 22219,
"Natur": 22220,
"TOP": 22221,
"bug": 22222,
"deg": 22223,
"ría": 22224,
"▁Asturias": 22225,
"▁dl": 22226,
"▁seal": 22227,
"▁some": 22228,
":13": 22229,
"Jes": 22230,
"LAP": 22231,
"Ok": 22232,
"That": 22233,
"ansa": 22234,
"entes": 22235,
"née": 22236,
"ollo": 22237,
"rawat": 22238,
"rén": 22239,
"▁colli": 22240,
"▁ending": 22241,
"TIM": 22242,
"cipit": 22243,
"irano": 22244,
"robe": 22245,
"ği": 22246,
"▁Laboratori": 22247,
"▁Upload": 22248,
"▁andre": 22249,
"▁arena": 22250,
"▁tener": 22251,
"1/": 22252,
"PTA": 22253,
"zama": 22254,
"▁Mér": 22255,
"▁Renaissance": 22256,
"▁Sath": 22257,
"▁bou": 22258,
"▁okay": 22259,
"angka": 22260,
"kler": 22261,
"lc": 22262,
"ÍN": 22263,
"ül": 22264,
"▁Pussy": 22265,
"▁RFE": 22266,
"▁images": 22267,
"▁luz": 22268,
"▁zone": 22269,
"From": 22270,
"▁+7": 22271,
"▁3:1": 22272,
"▁Balti": 22273,
"▁Buhari": 22274,
"atori": 22275,
"platz": 22276,
"rda": 22277,
"unge": 22278,
"vasta": 22279,
"wordpress": 22280,
"▁Schau": 22281,
"▁nor": 22282,
"500,000": 22283,
"bamba": 22284,
"camera": 22285,
"ești": 22286,
"stans": 22287,
"station": 22288,
"▁BAR": 22289,
"▁peta": 22290,
"▁porter": 22291,
"▁selle": 22292,
"UTC": 22293,
"beni": 22294,
"doma": 22295,
"zab": 22296,
"▁Lakshmi": 22297,
"▁Ljub": 22298,
"▁Wasser": 22299,
"▁volt": 22300,
"CSS": 22301,
"duction": 22302,
"niz": 22303,
"nr": 22304,
"plasma": 22305,
"ribu": 22306,
"source": 22307,
"tli": 22308,
"uwen": 22309,
"vela": 22310,
"▁Carrefour": 22311,
"▁Libra": 22312,
"▁Shko": 22313,
"▁salsa": 22314,
"BLUE": 22315,
"POL": 22316,
"meg": 22317,
"▁Arabi": 22318,
"▁Josip": 22319,
"▁Pien": 22320,
"▁Potom": 22321,
"▁very": 22322,
":36": 22323,
"ORD": 22324,
"adura": 22325,
"driv": 22326,
"esen": 22327,
"look": 22328,
"yap": 22329,
"▁Rö": 22330,
"▁confession": 22331,
"▁signat": 22332,
"▁sua": 22333,
"lık": 22334,
"▁Oma": 22335,
"▁Porta": 22336,
"▁Zürich": 22337,
"▁fitness": 22338,
"▁gin": 22339,
"▁hil": 22340,
"axe": 22341,
"bura": 22342,
"ibles": 22343,
"▁Krat": 22344,
"▁Person": 22345,
"▁ging": 22346,
"▁integr": 22347,
"▁kas": 22348,
"hte": 22349,
"ptica": 22350,
"▁Vader": 22351,
"新": 22352,
"8,000": 22353,
"CZ": 22354,
"Journal": 22355,
"epe": 22356,
"kad": 22357,
"xeta": 22358,
"▁GTA": 22359,
"▁Sandal": 22360,
"ZI": 22361,
"alach": 22362,
"fond": 22363,
"gola": 22364,
"shish": 22365,
"úl": 22366,
"▁December": 22367,
"▁Groß": 22368,
"▁Lubi": 22369,
"▁Uppsala": 22370,
"▁gem": 22371,
"▁ry": 22372,
"ADH": 22373,
"dici": 22374,
"layan": 22375,
"xir": 22376,
"▁Cea": 22377,
"▁WordPress": 22378,
"▁seo": 22379,
":37": 22380,
"TEA": 22381,
"buru": 22382,
"ddar": 22383,
"italia": 22384,
"▁Minu": 22385,
"▁kur": 22386,
":44": 22387,
"atay": 22388,
"lula": 22389,
"solution": 22390,
"▁HEL": 22391,
| |
<filename>bindings/python/cntk/utils/progress_print.py
# ==============================================================================
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
from __future__ import print_function
import os
import time
from cntk.cntk_py import TensorBoardFileWriter
# TODO: Let's switch to import logging in the future instead of print. [ebarsoum]
class ProgressPrinter(object):
'''
Allows tracking various training time statistics (e.g. loss and metric)
and output them as training progresses.
It provides the number of samples, average loss and average metric
since the last output or since the start of accumulation.
Args:
freq (int or None, default None): determines how often
printing will occur. The value of 0 means an geometric
schedule (1,2,4,...). A value > 0 means a arithmetic schedule
(a log print for minibatch number: ``freq``, a log print for minibatch number: 2*``freq``, a log print for minibatch number: 3*``freq``,...), and a value of None means no per-minibatch log.
first (int, default 0): Only start logging after the minibatch number is greater or equal to ``first``.
tag (string, default EmptyString): prepend minibatch log lines with your own string
log_to_file (string or None, default None): if None, output log data to stdout. If a string is passed, the string is path to a file for log data.
gen_heartbeat (bool, default False): If True output a progress message every 10 seconds or so to stdout.
num_epochs (int, default 300): The total number of epochs to be trained. Used for some metadata. This parameter is optional.
tensorboard_log_dir (string or None, default None): if a string is passed, logs statistics to the TensorBoard events file in the given directory.
model (:class:`~cntk.ops.Function` or None, default None): if a Function is passed and ``tensorboard_log_dir`` is not None, records model graph to a TensorBoard events file.
'''
def __init__(self, freq=None, first=0, tag='', log_to_file=None, rank=None, gen_heartbeat=False, num_epochs=300,
tensorboard_log_dir=None, model=None):
'''
Constructor. The optional ``freq`` parameter determines how often
printing will occur. The value of 0 means an geometric
schedule (1,2,4,...). A value > 0 means a arithmetic schedule
(freq, 2*freq, 3*freq,...), and a value of None means no per-minibatch log.
set log_to_file if you want the output to go file instead of stdout.
set rank to distributed.rank if you are using distibuted parallelism -- each rank's log will go to seperate file.
'''
from sys import maxsize
if freq is None:
freq = maxsize
self.loss_since_start = 0
self.metric_since_start = 0
self.samples_since_start = 0
self.updates_since_start = 0
self.loss_since_last = 0
self.metric_since_last = 0
self.samples_since_last = 0
self.total_updates = 0
self.epochs = 0
self.freq = freq
self.first = first
self.tag = '' if not tag else "[{}] ".format(tag)
self.epoch_start_time = 0
self.progress_timer_time = 0
self.log_to_file = log_to_file
self.rank = rank
self.gen_heartbeat = gen_heartbeat
self.num_epochs = num_epochs
# Create TensorBoardFileWriter if the path to a log directory was provided.
self.tensorboard_writer = None
if tensorboard_log_dir is not None:
tb_run_name = tag.lower() if tag else ''
if self.rank is not None:
tb_run_name += 'rank' + str(self.rank)
if tb_run_name:
tensorboard_log_dir = os.path.join(tensorboard_log_dir, tb_run_name)
self.tensorboard_writer = TensorBoardFileWriter(tensorboard_log_dir, model)
self.logfilename = None
if self.log_to_file is not None:
self.logfilename = self.log_to_file
if self.rank != None:
self.logfilename = self.logfilename + 'rank' + str(self.rank)
# print to stdout
print("Redirecting log to file " + self.logfilename)
with open(self.logfilename, "w") as logfile:
logfile.write(self.logfilename + "\n")
self.___logprint('CNTKCommandTrainInfo: train : ' + str(num_epochs))
self.___logprint('CNTKCommandTrainInfo: CNTKNoMoreCommands_Total : ' + str(num_epochs))
self.___logprint('CNTKCommandTrainBegin: train')
if freq==0:
self.___logprint(' average since average since examples')
self.___logprint(' loss last metric last ')
self.___logprint(' ------------------------------------------------------')
def end_progress_print(self, msg=""):
self.___logprint('CNTKCommandTrainEnd: train')
if msg != "" and self.log_to_file is not None:
self.___logprint(msg)
if self.tensorboard_writer is not None:
self.tensorboard_writer.close()
def flush(self):
if self.tensorboard_writer is not None:
self.tensorboard_writer.flush()
def avg_loss_since_start(self):
'''
Returns: the average loss since the start of accumulation
'''
return self.loss_since_start/self.samples_since_start
def avg_metric_since_start(self):
'''
Returns: the average metric since the start of accumulation
'''
return self.metric_since_start/self.samples_since_start
def avg_loss_since_last(self):
'''
Returns: the average loss since the last print
'''
return self.loss_since_last/self.samples_since_last
def avg_metric_since_last(self):
'''
Returns: the average metric since the last print
'''
return self.metric_since_last/self.samples_since_last
def reset_start(self):
'''
Resets the 'start' accumulators
Returns: tuple of (average loss since start, average metric since start, samples since start)
'''
ret = self.avg_loss_since_start(), self.avg_metric_since_start(), self.samples_since_start
self.loss_since_start = 0
self.metric_since_start = 0
self.samples_since_start = 0
self.updates_since_start = 0
return ret
def reset_last(self):
'''
Resets the 'last' accumulators
Returns: tuple of (average loss since last, average metric since last, samples since last)
'''
ret = self.avg_loss_since_last(), self.avg_metric_since_last(), self.samples_since_last
self.loss_since_last = 0
self.metric_since_last = 0
self.samples_since_last = 0
return ret
def ___logprint(self, logline):
if self.log_to_file == None:
# to stdout. if distributed, all ranks merge output into stdout
print(logline)
else:
# to named file. if distributed, one file per rank
with open(self.logfilename, "a") as logfile:
logfile.write(logline + "\n")
def epoch_summary(self, with_metric=False):
'''
If on an arithmetic schedule print an epoch summary using the 'start' accumulators.
If on a geometric schedule does nothing.
Args:
with_metric (`bool`): if `False` it only prints the loss, otherwise it prints both the loss and the metric
'''
self.epochs += 1
if self.freq > 0:
epoch_end_time = time.time()
time_delta = epoch_end_time - self.epoch_start_time
speed = 0
avg_loss, avg_metric, samples = (0, 0, 0)
if self.samples_since_start != 0:
avg_loss, avg_metric, samples = self.reset_start()
if (time_delta > 0):
speed = samples / time_delta
self.epoch_start_time = epoch_end_time
if with_metric:
self.___logprint("Finished Epoch[{} of {}]: {}loss = {:0.6f} * {}, metric = {:0.1f}% * {} {:0.3f}s ({:5.1f} samples per second);".format(self.epochs, self.num_epochs, self.tag, avg_loss, samples, avg_metric*100.0, samples, time_delta, speed))
else:
self.___logprint("Finished Epoch[{} of {}]: {}loss = {:0.6f} * {} {:0.3f}s ({:5.1f} samples per second);".format(self.epochs, self.num_epochs, self.tag, avg_loss, samples, time_delta, speed))
# For logging to TensorBoard, we use self.total_updates as it does not reset after each epoch.
self.update_value('epoch_avg_loss', avg_loss, self.epochs)
if with_metric:
self.update_value('epoch_avg_metric', avg_metric * 100.0, self.epochs)
return avg_loss, avg_metric, samples # BUGBUG: for freq=0, we don't return anything here
def ___generate_progress_heartbeat(self):
timer_delta = time.time() - self.progress_timer_time
# print progress no sooner than 10s apart
if timer_delta > 10 and self.gen_heartbeat:
# print to stdout
print("PROGRESS: 0.00%")
self.progress_timer_time = time.time()
def log(self, message):
self.___logprint(message)
def update(self, loss, minibatch_size, metric=None):
'''
Updates the accumulators using the loss, the minibatch_size and the optional metric.
Args:
loss (`float`): the value with which to update the loss accumulators
minibatch_size (`int`): the value with which to update the samples accumulator
metric (`float` or `None`): if `None` do not update the metric
accumulators, otherwise update with the given value
'''
self.samples_since_start += minibatch_size
self.samples_since_last += minibatch_size
self.loss_since_start += loss * minibatch_size
self.loss_since_last += loss * minibatch_size
self.updates_since_start += 1
self.total_updates += 1
if metric is not None:
self.metric_since_start += metric * minibatch_size
self.metric_since_last += metric * minibatch_size
if self.epoch_start_time == 0:
self.epoch_start_time = time.time()
self.___generate_progress_heartbeat()
if self.freq == 0 and (self.updates_since_start+1) & self.updates_since_start == 0:
avg_loss, avg_metric, samples = self.reset_last()
if metric is not None:
self.___logprint(' {:8.3g} {:8.3g} {:8.3g} {:8.3g} {:10d}'.format(
self.avg_loss_since_start(), avg_loss,
self.avg_metric_since_start(), avg_metric,
self.samples_since_start))
else:
self.___logprint(' {:8.3g} {:8.3g} {:8s} {:8s} {:10d}'.format(
self.avg_loss_since_start(), avg_loss,
'', '', self.samples_since_start))
elif self.freq > 0 and (self.updates_since_start % self.freq == 0 or self.updates_since_start <= self.first):
avg_loss, avg_metric, samples = self.reset_last()
if self.updates_since_start <= self.first: # printing individual MBs
first_mb = self.updates_since_start
else:
first_mb = max(self.updates_since_start - self.freq + 1, self.first+1)
if metric is not None:
self.___logprint(' Minibatch[{:4d}-{:4d}]: loss = {:0.6f} * {:d}, metric = {:0.1f}% * {:d};'.format(
first_mb, self.updates_since_start, avg_loss, samples, avg_metric*100.0, samples))
else:
self.___logprint(' Minibatch[{:4d}-{:4d}]: loss = {:0.6f} * {:d};'.format(
first_mb, self.updates_since_start, avg_loss, samples))
if self.updates_since_start > self.first:
# For logging to TensorBoard, we use self.total_updates as it does not reset after each epoch.
self.update_value('mb_avg_loss', avg_loss, self.total_updates)
if metric is not None:
self.update_value('mb_avg_metric', avg_metric * 100.0, self.total_updates)
def update_with_trainer(self, trainer, with_metric=False):
'''
Updates the accumulators using the loss, the minibatch_size and optionally the metric
using the information from the ``trainer``.
Args:
trainer (:class:`cntk.trainer.Trainer`): trainer from which information is gathered
with_metric (`bool`): whether to update the metric accumulators
'''
if trainer.previous_minibatch_sample_count == 0:
return
| |
<filename>docker/deps/torchlayers/torchlayers/convolution.py
import collections
import itertools
import math
import typing
import torch
from ._dev_utils import modules
from .pooling import GlobalAvgPool
class Conv(modules.InferDimension):
"""Standard convolution layer.
Based on input shape it either creates 1D, 2D or 3D convolution for inputs of shape
3D, 4D, 5D respectively (including batch as first dimension).
Additional `same` `padding` mode was added and set as default. Using it input dimensions
(except for channels) like height and width will be preserved (for odd kernel sizes).
`kernel_size` got a default value of `3`.
Otherwise acts exactly like PyTorch's Convolution, see
`documentation <https://pytorch.org/docs/stable/nn.html#convolution-layers>`__.
Parameters
----------
in_channels: int
Number of channels in the input image
out_channels: int
Number of channels produced by the convolution
kernel_size: int or tuple, optional
Size of the convolving kernel. Default: 3
stride: int or tuple, optional
Stride of the convolution. Default: 1
padding: int or tuple, optional
Zero-padding added to both sides of the input. Default: 0
padding_mode: string, optional
Accepted values `zeros` and `circular` Default: `zeros`
dilation: int or tuple, optional
Spacing between kernel elements. Default: 1
groups: int, optional
Number of blocked connections from input channels to output channels. Default: 1
bias: bool, optional
If ``True``, adds a learnable bias to the output. Default: ``True``
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding="same",
dilation=1,
groups=1,
bias=True,
padding_mode="zeros",
):
super().__init__(
instance_creator=Conv._pad,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
padding_mode=padding_mode,
)
@classmethod
def _dimension_pad(cls, dimension, dilation, kernel_size, stride):
if kernel_size % 2 == 0:
raise ValueError(
'Only odd kernel size for padding "same" is currently supported.'
)
return max(
math.ceil(
(dimension * stride - dimension + dilation * (kernel_size - 1)) // 2
),
0,
)
@classmethod
def _expand_if_needed(cls, dimensions, argument):
if isinstance(argument, collections.abc.Iterable):
return argument
return tuple(itertools.repeat(argument, len(dimensions)))
@classmethod
def _pad(cls, inputs, inner_class, **kwargs):
if isinstance(kwargs["padding"], str) and kwargs["padding"].lower() == "same":
dimensions = inputs.shape[2:]
paddings = tuple(
cls._dimension_pad(dimension, dilation, kernel_size, stride)
for dimension, dilation, kernel_size, stride in zip(
dimensions,
*[
cls._expand_if_needed(dimensions, kwargs[name])
for name in ("dilation", "kernel_size", "stride")
],
)
)
kwargs["padding"] = paddings
return inner_class(**kwargs)
class ConvTranspose(modules.InferDimension):
"""Standard transposed convolution layer.
Based on input shape it either creates 1D, 2D or 3D convolution (for inputs of shape
3D, 4D, 5D including batch as first dimension).
Otherwise acts exactly like PyTorch's Convolution, see
`documentation <https://pytorch.org/docs/stable/nn.html#convolution-layers>`__.
Parameters
----------
in_channels: int
Number of channels in the input image
out_channels: int
Number of channels produced by the convolution
kernel_size: int or tuple
Size of the convolving kernel
stride: int or tuple, optional
Stride of the convolution. Default: 1
padding: int or tuple, optional
``dilation * (kernel_size - 1) - padding`` zero-padding
will be added to both sides of the input. Default: 0
output_padding: int or tuple, optional
Additional size added to one side of the output shape. Default: 0
groups: int, optional
Number of blocked connections from input channels to output channels. Default: 1
bias: bool, optional
If ``True``, adds a learnable bias to the output. Default: ``True``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
dilation: int or tuple, optional
Spacing between kernel elements. Default: 1
padding_mode: string, optional
Accepted values `zeros` and `circular` Default: `zeros`
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode="zeros",
):
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding,
groups=groups,
bias=bias,
dilation=dilation,
padding_mode=padding_mode,
)
class ChannelShuffle(modules.Representation):
"""Shuffle output channels from modules.
When using group convolution knowledge transfer between next layers is reduced
(as the same input channels are convolved with the same output channels).
This layer reshuffles output channels via simple `reshape` in order to mix the representation
from separate groups and improve knowledge transfer.
Originally proposed by <NAME>. al in:
`ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices <https://arxiv.org/abs/1707.01083>`__
Parameters
----------
groups: int
Count of groups used in the previous convolutional layer.
"""
def __init__(self, groups: int):
super().__init__()
self.groups: int = groups
def forward(self, inputs):
return (
inputs.reshape(inputs.shape[0], self.groups, -1, *inputs.shape[2:])
.transpose(1, 2)
.reshape(*inputs.shape)
)
class ChannelSplit(modules.Representation):
"""Convenience layer splitting tensor using ratio.
Returns two outputs, splitted accordingly to parameters.
Parameters
----------
ratio: float
Percentage of channels to be split
dim: int
Dimension along which input will be splitted. Default: `1` (channel dimension)
"""
def __init__(self, ratio: float, dim: int = 1):
super().__init__()
if not 0.0 < ratio < 1.0:
raise ValueError(
"Ratio of small expand fire module has to be between 0 and 1."
)
self.ratio: float = ratio
self.dim: int = dim
def forward(self, inputs):
return torch.split(inputs, int(inputs.shape[1] * self.ratio), dim=self.dim)
class Residual(torch.nn.Module):
"""Residual connection adding input to output of provided module.
Originally proposed by He et. al in `ResNet <www.arxiv.org/abs/1512.03385>`__
For correct usage it is advised to keep input line (skip connection) without
any layer or activation and implement transformations only in module arguments
(as per https://arxiv.org/pdf/1603.05027.pdf).
Above can be easily achieved by using one of BatchNormConv competitorch modules.
Parameters
----------
module: torch.nn.Module
Convolutional PyTorch module (or other compatible module).
Shape of module's `inputs` has to be equal to it's `outputs`, both
should be addable `torch.Tensor` instances.
"""
def __init__(self, module: torch.nn.Module):
super().__init__()
self.module = module
def forward(self, inputs):
return self.module(inputs) + inputs
class Dense(torch.nn.Module):
"""Dense residual connection concatenating input channels and output channels of provided module.
Originally proposed by <NAME> et. al in `Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`__
Parameters
----------
module: torch.nn.Module
Convolutional PyTorch module (or other compatible module).
Shape of module's `inputs` has to be equal to it's `outputs`, both
should be addable `torch.Tensor` instances.
dim: int, optional
Dimension along which `input` and module's `output` will be concatenated.
Default: `1` (channel-wise)
"""
def __init__(self, module: torch.nn.Module, dim: int = 1):
super().__init__()
self.module: torch.nn.Module = module
self.dim: int = dim
def forward(self, inputs):
return torch.cat(self.module(inputs), inputs, dim=self.dim)
class Poly(torch.nn.Module):
"""Apply one module to input multiple times and sum.
It's equation for `order` equal to :math:`N` can be written as:
.. math::
1 + F + F^2 + ... + F^N
where :math:`1` is identity and :math:`F` is mapping specified by `module`.
Originally proposed by <NAME> et. al in
`PolyNet: A Pursuit of Structural Diversity in Very Deep Networks <https://arxiv.org/abs/1608.06993>`__
Parameters
----------
module: torch.nn.Module
Convolutional PyTorch module (or other compatible module).
`inputs` shape has to be equal to it's `output` shape
(for 2D convolution it would be :math:`(C, H, W)` (channels, height, width respectively)).
order: int, optional
Order of PolyInception module. For order equal to `1` acts just like
ResNet, order of `2` was used in original paper. Default: `2`
"""
def __init__(self, module: torch.nn.Module, order: int = 2):
super().__init__()
if order < 1:
raise ValueError("Order of Poly cannot be less than 1.")
self.module: torch.nn.Module = module
self.order: int = order
def extra_repr(self):
return f"order={self.order},"
def forward(self, inputs):
outputs = [self.module(inputs)]
for _ in range(1, self.order):
outputs.append(self.module(outputs[-1]))
return torch.stack([inputs] + outputs, dim=0).sum(dim=0)
class MPoly(torch.nn.Module):
"""Apply multiple (m) modules to input multiple times and sum.
It's equation for `modules` length equal to :math:`N` would be:
.. math::
1 + F_0 + F_0 * F_1 + ... + F_0 * F_1 * ... * F_N
where :math:`1` is identity and consecutive :math:`F_N` are consecutive models
specified by user.
Originally proposed by <NAME> et. al in
`PolyNet: A Pursuit of Structural Diversity in Very Deep Networks <https://arxiv.org/abs/1608.06993>`__
Parameters
----------
modules: *torch.nn.Module
Var arg with modules to use with WayPoly. If empty, acts as an identity.
for one module, acts like `ResNet`. `2` were used in original paper.
All modules need `inputs` and `outputs` shape equal and equal between themselves.
"""
def __init__(self, *modules: torch.nn.Module):
super().__init__()
self.modules_: torch.nn.Module = torch.nn.ModuleList(modules)
def forward(self, inputs):
outputs = [self.modules_[0](inputs)]
for module in self.modules_[1:]:
outputs.append(self.module(outputs[-1]))
return torch.stack([inputs] + outputs, dim=0).sum(dim=0)
class WayPoly(torch.nn.Module):
"""Apply multiple modules to input and sum.
It's equation for `modules` length equal to :math:`N` would be:
.. math::
1 + F_1 + F_2 + ... + F_N
where :math:`1` is identity and consecutive :math:`F_N` are consecutive models
specified by user.
Can be considered as an extension of standard | |
x = self.cache['cloudtrail']['get_trail_status'].get(region,{})[ct['TrailARN']]['LatestCloudWatchLogsDeliveryTime']
if (time.time() - x) < 86400:
evidence = {region : self.cache['cloudtrail']['get_trail_status'].get(region,{})[ct['TrailARN']]['LatestCloudWatchLogsDeliveryTime']}
compliance = 1
self.finding(policy,compliance,evidence)
# --------------------------------------------------------
policy = {
'name' : 'Ensure AWS Config is enabled in all regions',
'description' : 'The AWS configuration item history captured by AWS Config enables security analysis, resource change tracking, and compliance auditing.',
'remediation' : 'Follow <a href="https://docs.aws.amazon.com/config/latest/developerguide/gs-console.html">AWS Best Practices</a> to enable AWS Config in all regions.',
'vulnerability' : 'Without AWS Config enabled, technical teams will struggle to identify the historical changes to resources when the need arise for forensic investigation.',
'severity' : 'low',
'reference' : [
'AWS CIS v.1.4.0 - 3.5',
'AWS CIS v.1.2.0 - 2.5'
],
'links' : [
'https://d0.awsstatic.com/whitepapers/compliance/AWS_CIS_Foundations_Benchmark.pdf#page=72',
'https://docs.aws.amazon.com/config/latest/developerguide/gs-console.html'
]
}
for region in regionList:
compliance = 0
evidence = {'region' : region}
for c in self.cache['config']['describe_configuration_recorders'].get(region,{}).get('ConfigurationRecorders',{}):
if c.get('recordingGroup').get('allSupported') == True and c.get('recordingGroup').get('includeGlobalResourceTypes') == True:
# == so far so good. Let's see if we can find the recording status
for s in self.cache['config']['describe_configuration_recorder_status'].get(region,{})['ConfigurationRecordersStatus']:
if s['name'] == c['name']:
if s['recording'] == True and s['lastStatus'] == 'SUCCESS':
compliance = 1
self.finding(policy,compliance,evidence)
# -------------------------------------------------------
policy = {
'name' : 'Ensure S3 bucket access logging is enabled on the CloudTrail S3 bucket',
'description' : 'S3 Bucket Access Logging generates a log that contains access records for each request made to your S3 bucket. An access log record contains details about the request, such as the request type, the resources specified in the request worked, and the time and date the request was processed. It is recommended that bucket access logging be enabled on the CloudTrail S3 bucket.',
'remediation' : 'Follow <a href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html">AWS Best Practices</a> to enable S3 access logging.',
'vulnerability' : 'By enabling S3 bucket logging on target S3 buckets, it is possible to capture all events which may affect objects within an target buckets. Configuring logs to be placed in a separate bucket allows access to log information which can be useful in security and incident response workflows.',
'severity' : 'low',
'reference' : [
'AWS CIS v.1.4.0 - 3.6',
'AWS CIS v.1.2.0 - 2.6'
],
'links' : [
'https://d0.awsstatic.com/whitepapers/compliance/AWS_CIS_Foundations_Benchmark.pdf#page=75',
'https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html'
]
}
for region in self.cache['cloudtrail']['describe_trails']:
compliance = 0
for ct in self.cache['cloudtrail']['describe_trails'].get(region,{}).get('trailList',{}):
if 'S3BucketName' in ct:
logging = self.cache['s3']['get_bucket_logging'].get('us-east-1',{}).get(ct['S3BucketName'],{}).get('LoggingEnabled',{}).get('TargetBucket',None)
if logging != None:
compliance = 1
self.finding(policy,compliance,region)
# -------------------------------------------------------
policy = {
'name' : 'Ensure CloudTrail logs are encrypted at rest using KMS CMKs',
'description' : 'AWS CloudTrail is a web service that records AWS API calls for an account and makes those logs available to users and resources in accordance with IAM policies. AWS Key Management Service (KMS) is a managed service that helps create and control the encryption keys used to encrypt account data, and uses Hardware Security Modules (HSMs) to protect the security of encryption keys. CloudTrail logs can be configured to leverage server side encryption (SSE) and KMS customer created master keys (CMK) to further protect CloudTrail logs. It is recommended that CloudTrail be configured to use SSE-KMS.',
'remediation' : 'Follow <a href="https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html">AWS Best Practices</a> to enable S3 encryption.',
'vulnerability' : 'Configuring CloudTrail to use SSE-KMS provides additional confidentiality controls on log data as a given user must have S3 read permission on the corresponding log bucket and must be granted decrypt permission by the CMK policy.',
'severity' : 'low',
'reference' : [
'AWS CIS v.1.4.0 - 3.7',
'AWS CIS v.1.2.0 - 2.7'
],
'links' : [
'https://d0.awsstatic.com/whitepapers/compliance/AWS_CIS_Foundations_Benchmark.pdf#page=78',
'https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html'
]
}
for region in self.cache['cloudtrail']['describe_trails']:
compliance = 0
for ct in self.cache['cloudtrail']['describe_trails'].get(region,{}).get('trailList',{}):
if 'KmsKeyId' in ct:
compliance = 1
self.finding(policy,compliance,region)
# -------------------------------------------------------
policy = {
'name' : 'Ensure rotation for customer created CMKs is enabled',
'references' : [
'AWS CIS v.1.4.0 - 3.8',
'AWS CIS v.1.2.0 - 2.8'
],
'description' : 'Rotating encryption keys helps reduce the potential impact of a compromised key as data encrypted with a new key cannot be accessed with a previous key that may have been exposed.',
'vulnerability' : 'By not rotating encryption keys, there is a higher likelihood of data compromize due to improper management of secret keys.',
'remediation' : 'Follow <a href="https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html">AWS Best Practices</a> to rotate keys.',
'severity' : 'medium',
'links' : [
'https://d0.awsstatic.com/whitepapers/compliance/AWS_CIS_Foundations_Benchmark.pdf#page=82',
'https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html'
]
}
for region in regionList:
if region in self.cache['kms']['get_key_rotation_status']:
for k in self.cache['kms']['get_key_rotation_status'].get(region,{}):
evidence = { region : k}
if self.cache['kms']['get_key_rotation_status'].get(region,{})[k] == True:
self.finding(policy,1,evidence)
else:
self.finding(policy,0,evidence)
# -------------------------------------------------------
policy = {
'name' : 'Ensure VPC flow logging is enabled in all VPCs',
'description' : 'VPC Flow Logs provide visibility into network traffic that traverses the VPC and can be used to detect anomalous traffic or insight during security workflows.',
'vulnerability' : 'Without VPC Flow Logs, technical teams will not have visibility on how network traffic flows.',
'remediation' : 'Follow <a href="https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html">AWS Best Practices</a> to enable VPC Flow Logs.',
'severity' : 'low',
'reference' : [
'AWS CIS v.1.4.0 - 3.9',
'AWS CIS v.1.2.0 - 2.9'
],
'links' : [
'https://d0.awsstatic.com/whitepapers/compliance/AWS_CIS_Foundations_Benchmark.pdf#page=84',
'https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html'
]
}
for region in regionList:
for VV in self.cache['ec2']['describe_vpcs'].get(region,{}):
for v in VV['Vpcs']:
compliance = 0
evidence = { region : v['VpcId'] }
for F in self.cache['ec2']['describe_flow_logs'].get(region,{}):
for fl in F['FlowLogs']:
if fl['ResourceId'] == v['VpcId']:
compliance = 1
self.finding(policy,compliance,evidence)
# --------------------------------------
# == CIS 3.x is special -- all the metrics are identical, except for the filter pattern. So we break our "one policy" rule, and combine them all into a list
POLICIES = [
{
'name' : 'Ensure a log metric filter and alarm exist for unauthorized API calls',
'description' : 'Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for unauthorized API calls.',
'vulnerability' : 'Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.',
'remediation' : 'Follow the steps in the CIS Benchmark paper',
'severity' : 'info',
'links' : [
'https://d0.awsstatic.com/whitepapers/compliance/AWS_CIS_Foundations_Benchmark.pdf#page=88'
],
'references' : [
'AWS CIS v.1.4.0 - 4.1',
'AWS CIS v.1.2.0 - 3.1'
],
'filterPattern' : '{ ($.errorCode = "*UnauthorizedOperation") || ($.errorCode = "AccessDenied*") }'
},
{
'name' : 'Ensure a log metric filter and alarm exist for Management Console sign-in without MFA',
'description' : 'Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for console logins that are not protected by multi-factor authentication (MFA).',
'vulnerability' : 'Monitoring for single-factor console logins will increase visibility into accounts that are not protected by MFA.',
'remediation' : 'Follow the steps in the CIS Benchmark paper',
'severity' : 'info',
'links' : [
'https://d0.awsstatic.com/whitepapers/compliance/AWS_CIS_Foundations_Benchmark.pdf#page=92'
],
'references' : [
'AWS CIS v.1.4.0 - 4.2',
'AWS CIS v.1.2.0 - 3.2'
],
'filterPattern' : '{ ($.eventName = "ConsoleLogin") && ($.additionalEventData.MFAUsed != "Yes") }'
},
{
'name' : 'Ensure a log metric filter and alarm exist for usage of "root" account',
'description' : 'Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for root login attempts.',
'vulnerability' : 'Monitoring for root account logins will provide visibility into the use of a fully privileged account and an opportunity to reduce the use of it.',
'remediation' : 'Follow the steps in the CIS Benchmark paper',
'severity' : 'info',
'links' : [
'https://d0.awsstatic.com/whitepapers/compliance/AWS_CIS_Foundations_Benchmark.pdf#page=96'
],
'references' : [
'AWS CIS v.1.4.0 - 4.3',
'AWS CIS v.1.2.0 - 3.3'
],
'filterPattern' : '{ $.userIdentity.type = "Root" && $.userIdentity.invokedBy NOT EXISTS && $.eventType != "AwsServiceEvent" }'
},
{
'name' : 'Ensure a log metric filter and alarm exist for IAM policy changes',
'description' : 'Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch | |
"""
Module for building and manipulating astronomical catalogues.
@author: A.Ruiz
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import zip, range
from io import open
import os
import warnings
import tempfile
import subprocess
from copy import deepcopy
from string import ascii_uppercase
import numpy as np
from astropy import log
from astropy import units as u
from astropy.coordinates import SkyCoord
#from astropy.utils.misc import ShapedLikeNDArray
from astropy.table import Table, join, setdiff, unique, vstack
from astropy.units.quantity import Quantity
from astropy.utils.misc import indent
from astropy.utils.exceptions import AstropyUserWarning
from mocpy import MOC
# Global
ALLSKY_AREA_DEG = (4*np.pi * u.rad**2).to(u.deg**2)
class SkyCoordErr(object):
"""
A class for the positional errors of a SkyCoord object
"""
# TODO: Use ShapedLikeNDArray as base object
ERRTYPE = ['circle', 'ellipse', 'rcd_dec_ellipse',
'cov_ellipse', 'cor_ellipse']
def __init__(self, data, errtype='circle', unit=None, errsys=None, check=True):
self.errtype = self._set_errtype(errtype)
self.components = self._set_components(data, unit)
if errsys is not None:
self.add_syserr(errsys)
if check:
self._check_components()
def __repr__(self):
comp_str = ', '.join(self.components.colnames)
unit_str = ', '.join([str(col.unit) for col in self.components.itercols()])
data_str = indent(str(self.components.as_array()))
err_str = '<SkyCoordErr ({}): ({}) in {}\n{}>'
return err_str.format(self.errtype, comp_str, unit_str, data_str)
def __getitem__(self, key):
item_data = self.components[key]
if not isinstance(item_data, Table):
# We do this because when key is an integer and the components
# only have one column, it returns components[key] returns a row
# instead of a Table.
item_data = self.components[key:key+1]
return SkyCoordErr(item_data, errtype=self.errtype, check=False)
def __len__(self):
return len(self.components)
def transform_to(self, errtype='ellipse'):
"""
Transform errors to `errtype`
"""
not_implemented_errtypes = ['rcd_dec_ellipse',
'cov_ellipse',
'cor_ellipse']
covmatrix = self.covariance_matrix()
if errtype == 'circle':
errs = self._to_circular(covmatrix)
elif errtype == 'ellipse':
errs = self._to_ellipse(covmatrix)
elif errtype in not_implemented_errtypes:
# TODO: implement remaining transformations
raise NotImplementedError
else:
raise ValueError('Unknown error type: {}'.format(errtype))
return errs
def as_array(self):
"""
Return error values as a numpy array.
"""
errs = self.components
if self.errtype == 'circle':
#err_arrays = errs.columns[0].data << errs.columns[0].unit
err_arrays = errs.columns[0].data * errs.columns[0].unit
else:
err_arrays = []
for col in errs.itercols():
#err_arrays.append(col.data << col.unit)
err_arrays.append(col.data * u.Unit(col.unit))
err_arrays = np.array(err_arrays)
return err_arrays
def covariance_matrix(self, inverse=False):
"""
Returns the corresponding covariance matrix. If `inverse` is True,
returns the inverse of the covariance matrix.
"""
sigma_x, sigma_y, rhoxy = self._covariance_components()
if inverse:
V = self._inverse_covariance_matrix(sigma_x, sigma_y, rhoxy)
else:
V = self._covariance_matrix(sigma_x, sigma_y, rhoxy)
return V
def add_syserr(self, syserr):
"""
Add systematic to the error components. Only works for circular errors.
"""
if self.errtype == 'circle':
data = self.components.columns[0].data
unit = self.components.columns[0].unit
err = data * u.Unit(unit)
errcol = self.components.colnames[0]
self.components[errcol] = np.sqrt(syserr**2 + err**2)
else:
raise NotImplementedError
def _set_errtype(self, errtype):
"""
Check that `errtype` is a valid value.
"""
if errtype not in self.ERRTYPE:
raise ValueError('Unknown error type: {}'.format(errtype))
else:
return errtype
def _set_components(self, data, unit=None):
"""
Define an astropy table with statistical positional errors
(no systematic errors applied here). The number of columns depends
on what kind of errors are defined
"""
if unit is None:
unit = self._get_default_units()
poserr = Table()
for col, col_unit in zip(data.colnames, unit):
if data[col].unit is None:
poserr[col] = data[col]*col_unit
else:
poserr[col] = data[col].to(col_unit)
# # Set bad values to zero
# good_mask = np.isfinite(poserr[col])
# poserr[col][~good_mask] = 0.0
#
# negative_mask = poserr[col] < 0
# poserr[col][negative_mask] = 0.0
return poserr
def _check_components(self):
"""
Check that all errors are positive and finite (not nan or inf)
"""
for i, col in enumerate(self.components.colnames):
if i >= 2:
break
if not all(np.isfinite(self.components[col])):
raise ValueError('Some positional errors are not finite!')
if not all(self.components[col] > 0):
raise ValueError('Some positional errors are non positive!')
def _get_default_units(self):
"""
Define default units depending on the error type
"""
if self.errtype == "circle":
# RADEC_ERR (e.g. 3XMM)
units = [u.arcsec]
elif self.errtype == "ellipse":
# major axis, minor axis, position angle (e.g. 2MASS)
units = [u.arcsec, u.arcsec, u.deg]
elif self.errtype == "rcd_dec_ellipse":
# ra error, dec error (e.g. SDSS)
units = [u.arcsec, u.arcsec]
elif self.errtype == "cov_ellipse":
# sigma_x, sigma_y, covariance
units = [u.arcsec, u.arcsec, u.arcsec**2]
elif self.errtype == "cor_ellipse":
# sigma_x, sigma_y, correlation
units = [u.arcsec, u.arcsec, u.arcsec/u.arcsec]
else:
raise ValueError('Wrong errtype!')
return units
def _to_ellipse(self, covmatrix):
"""
Calculate components of the ellipse error from the covariance
matrix and define a SkyCoordErr object with those components.
"""
a, b, PA = self._covariance_to_ellipse(covmatrix)
errs = Table([a, b, PA], names=['eeMaj', 'eeMin', 'eePA'])
return SkyCoordErr(errs, errtype='ellipse')
def _to_circular(self, covmatrix):
"""
Estimate equivalent circular errors from the covariance matrix
and define a SkyCoordErr object with those components.
"""
if self.errtype != 'circle':
message = ('Converting non-circular to circular errors! '
'New errors will preserve the area.')
warnings.warn(message, AstropyUserWarning)
# The determinat of the covariance matrix is related to the
# 1 sigma area covered by the positional errors: A = pi * sqrt(|V|)
# If we want a circular error that preserves the area:
# r = |V|^(1/4)
r = np.power(np.linalg.det(covmatrix), 0.25)
errs = Table([r], names=['RADEC_ERR'])
return SkyCoordErr(errs, errtype='circle')
else:
return self
def _covariance_components(self):
"""
Calculate the components of the covariance matrix from the errors
"""
npars = len(self.components.colnames)
errs = self.components
if self.errtype == "circle":
if npars != 1:
raise ValueError('Wrong error type!')
else:
sigma_x = np.array(errs.columns[0])*errs.columns[0].unit
sigma_y = np.array(errs.columns[0])*errs.columns[0].unit
rhoxy = np.zeros(len(sigma_x))*errs.columns[0].unit**2
elif self.errtype == "ellipse":
if npars != 3:
raise ValueError('Wrong error type!')
else:
err0 = np.array(errs.columns[0])*errs.columns[0].unit
err1 = np.array(errs.columns[1])*errs.columns[1].unit
err2 = np.array(errs.columns[2])*errs.columns[2].unit
sigma_x = np.sqrt((err0*np.sin(err2))**2 +
(err1*np.cos(err2))**2)
sigma_y = np.sqrt((err0*np.cos(err2))**2 +
(err1*np.sin(err2))**2)
rhoxy = np.cos(err2)*np.sin(err2)*(err0**2 - err1**2)
elif self.errtype == "rcd_dec_ellipse":
if npars != 2:
raise ValueError('Wrong error type!')
else:
sigma_x = np.array(errs.columns[0])*errs.columns[0].unit
sigma_y = np.array(errs.columns[1])*errs.columns[1].unit
rhoxy = np.zeros(len(sigma_x))*errs.columns[0].unit**2
elif self.errtype == "cov_ellipse":
if npars != 3:
raise ValueError('Wrong error type!')
else:
sigma_x = np.array(errs.columns[0])*errs.columns[0].unit
sigma_y = np.array(errs.columns[1])*errs.columns[1].unit
rhoxy = np.array(errs.columns[2])*errs.columns[2].unit
elif self.errtype == "cor_ellipse":
if npars != 3:
raise ValueError('Wrong error type!')
else:
err0 = np.array(errs.columns[0])*errs.columns[0].unit
err1 = np.array(errs.columns[1])*errs.columns[1].unit
err2 = np.array(errs.columns[2])*errs.columns[2].unit
sigma_x = err0
sigma_y = err1
rhoxy = err2*err0*err1
else:
raise ValueError('Unknown error type: {}'.format(self.errtype))
return sigma_x, sigma_y, rhoxy
@staticmethod
def _covariance_matrix(sigma_x, sigma_y, rhoxy):
"""
Calculates the covariance matrix V with
elements sigma_x, sigma_y and rhoxy.
(Eq. 6 of Pineau+2017)
"""
V = np.full((len(sigma_x), 2, 2), np.nan)
V[:, 0, 0] = sigma_x**2
V[:, 0, 1] = rhoxy
V[:, 1, 0] = rhoxy
V[:, 1, 1] = sigma_y**2
return V
@staticmethod
def _inverse_covariance_matrix(sigma_x, sigma_y, rhoxy):
"""
Calculates the inverse of the covariance matrix V with
elements sigma_x, sigma_y and rhoxy
(Eq. 7 of Pineau+2017)
"""
K = (sigma_x*sigma_y)**2 - rhoxy**2
Vinv = np.full((len(sigma_x), 2, 2), np.nan)
Vinv[:, 0, 0] = sigma_y**2/K
Vinv[:, 0, 1] = -rhoxy/K
Vinv[:, 1, 0] = -rhoxy/K
Vinv[:, 1, 1] = sigma_x**2/K
return Vinv
@staticmethod
def _covariance_to_ellipse(V):
"""
Given the covariance matrix V, returns the corresponding ellipse
error with semi-major axis a, semi-minor axis b (in arcsec)
and position angle PA (in degrees)
"""
A = V[:, 0, 0] + V[:, 1, 1] # sigma_x**2 + sigma_y**2
B = V[:, 1, 1] - V[:, 0, 0] # sigma_y**2 - sigma_x**2
C = V[:, 1, 0] # rho*sigma_x*sigma_y
a = np.sqrt((A + np.sqrt(B**2 + 4*C**2))/2)
b = np.sqrt((A - np.sqrt(B**2 + 4*C**2))/2)
PA = np.arctan2(2*C, B)/2
PA[PA < 0] += np.pi
return a, b, PA*(180/np.pi)
class Catalogue(object):
"""
A class for catalogue objects.
Parameters
----------
data_table : Astropy ``Table`` or ``str``
Astropy ``Table`` with the catalogue data. Alternatively, the path
to a file containing the catalogue data in a format compatible with
Astropy (fits, csv, VOTable, etc) can be passed. It should contain at
least three columns: the identification labels of the sources and their
coordinates (e.g. RA and Dec).
area : ``str``, ``MOC`` or ``Quantity``
Sky area covered by the catalogue. the area can be defined as a path
to the catalogue MOC, a mocpy ``MOC`` object or an Astropy ``Quantity``
with units consistents with square deg.
name : ``str` or ``None``, optional
Catalogue identification label. If None, it uses the file name of
`data_table`. Defaults to ``None``.
id_col : ``str`` or ``None``, optional
Name of the column in `data_table` with the identification labels. If
``None``, it assumes that the first column contains the id labels.
coord_cols : ``list``, optional
Two element list with | |
# -*- coding: utf-8 -*-
"""
Group Routes
List Groups
Count Groups
State of Group
Create Group
Get Group
Add User to Group
Remove User to Group
"""
import asyncio
import uuid
from datetime import datetime
from fastapi import APIRouter, Query, status
from fastapi.responses import JSONResponse, ORJSONResponse
from loguru import logger
from com_lib.crud_ops import execute_one_db, fetch_all_db, fetch_one_db
from com_lib.db_setup import database, groups, groups_item
from endpoints.groups.models import (
GroupCreate,
GroupItemDelete,
GroupTypeEnum,
GroupUser,
)
from endpoints.groups.validation import (
check_id_exists,
check_unique_name,
check_user_exists,
check_user_id_exists,
)
router = APIRouter()
title = "Delay in Seconds"
@router.get("/list", tags=["groups"])
async def group_list(
delay: int = Query(
None,
title=title,
description="Seconds to delay (max 121)",
ge=1,
le=121,
alias="delay",
),
qty: int = Query(
None,
title="Quanity",
description="Records to return (max 500)",
ge=1,
le=500,
alias="qty",
),
offset: int = Query(
None, title="Offset", description="Offset increment", ge=0, alias="offset"
),
is_active: bool = Query(None, title="by active status", alias="active"),
group_type: GroupTypeEnum = Query(
None, title="groupType", description="Type of group", alias="groupType"
),
group_name: str = Query(
None,
title="Group Name",
description="Get by the Group Name",
alias="groupName",
),
) -> dict:
"""[summary]
Get list of all groups and associated information
Args:
delay (int, optional): [description]. Defaults to Query( None, title=title,
description="Seconds to delay (max 121)", ge=1, le=121, alias="delay", ).
qty (int, optional): [description]. Defaults to Query( None, title="Quanity",
description="Records to return (max 500)", ge=1, le=500, alias="qty", ).
offset (int, optional): [description]. Defaults to Query( None, title="Offset",
description="Offset increment", ge=0, alias="offset" ).
is_active (bool, optional): [description]. Defaults to Query(None,
title="by active status", alias="active").
group_type (GroupTypeEnum, optional): [description]. Defaults to Query( None,
title="groupType", description="Type of group", alias="groupType" ).
Returns:
dict: [description]
GroupId, Name, Description, active state, dates created & updated
"""
criteria = []
# sleep if delay option is used
if delay is not None:
await asyncio.sleep(delay)
if qty is None:
qty: int = 100
if offset is None:
offset: int = 0
if is_active is not None:
criteria.append((groups.c.is_active, is_active, "equal"))
if group_type is not None:
criteria.append((groups.c.group_type, group_type, "equal"))
if group_name is not None:
criteria.append((groups.c.name, group_name, "ilike"))
query = groups.select().order_by(groups.c.date_create).limit(qty).offset(offset)
count_query = groups.select()
for crit in criteria:
col, val, compare_type = crit
if compare_type == "ilike":
query = query.where(col.ilike(f"%{val}%"))
else:
query = query.where(col == val)
count_query = count_query.where(col == val)
db_result = await database.fetch_all(query)
total_count = await database.fetch_all(count_query)
result = {
"parameters": {
"returned_results": len(db_result),
"qty": qty,
"total_count": len(total_count),
"offset": offset,
"filter": is_active,
"delay": delay,
},
"groups": db_result,
}
return result
@router.get("/list/count", tags=["groups"])
async def group_list_count(
delay: int = Query(
None,
title=title,
description="Seconds to delay (max 121)",
ge=1,
le=121,
alias="delay",
),
is_active: bool = Query(None, title="by active status", alias="active"),
group_type: GroupTypeEnum = Query(
None, title="groupType", description="Type of group", alias="groupType"
),
) -> dict:
"""[summary]
Get a count of groups
Args:
delay (int, optional): [description]. Defaults to Query( None,
title=title, description="Seconds to delay (max 121)", ge=1, le=121, alias="delay", ).
is_active (bool, optional): [description]. Defaults to Query(None,
title="by active status", alias="active").
group_type (GroupTypeEnum, optional): [description]. Defaults to Query( None,
title="groupType", description="Type of group", alias="groupType" ).
Returns:
dict: [description]
count based on filters
"""
criteria = []
# sleep if delay option is used
if delay is not None:
await asyncio.sleep(delay)
if is_active is not None:
criteria.append((groups.c.is_active, is_active))
if group_type is not None:
criteria.append((groups.c.group_type, group_type))
query = groups.select().order_by(groups.c.date_create)
count_query = groups.select()
for crit in criteria:
col, val = crit
query = query.where(col == val)
count_query = count_query.where(col == val)
total_count = await database.fetch_all(count_query)
result = {
"parameters": {
"total_count": len(total_count),
"filter": is_active,
"delay": delay,
},
}
return result
@router.put(
"/state",
tags=["groups"],
response_description="ID Modified",
response_class=ORJSONResponse,
status_code=201,
responses={
# 302: {"description": "Incorrect URL, redirecting"},
400: {"description": "Bad Request"},
422: {"description": "Validation Error"},
404: {"description": "Not Found"},
405: {"description": "Method not allowed"},
500: {"description": "All lines are busy, try again later."},
},
)
async def group_state(
*,
id: str = Query(..., title="group id", description="Group UUID", alias="id",),
is_active: bool = Query(
None,
title="active status",
description="true or false of status",
alias="isActive",
),
delay: int = Query(
None,
title=title,
ge=1,
le=10,
alias="delay",
description="integer delay value for simulating delays",
),
) -> dict:
"""[summary]
Active or Deactivate a Group ID
Args:
id (str, optional): [description]. Defaults to
Query(..., title="group id", description="Group UUID", alias="id",).
state (bool, optional): [description]. Defaults to
Query( ..., title="active state", description="true or false of state", alias="state", ).
delay (int, optional): [description]. Defaults to
Query( None, title=title, ge=1, le=10, alias="delay",
description="integer delay value for simulating delays", ).
Returns:
dict: [id, state]
"""
# sleep if delay option is used
if delay is not None:
logger.info(f"adding a delay of {delay} seconds")
await asyncio.sleep(delay)
if is_active is None:
error: dict = {"error": f"isActive must be true or false and cannot be empty"}
logger.warning(error)
return JSONResponse(status_code=422, content=error)
id_exists = await check_id_exists(id)
if id_exists is False:
error: dict = {"error": f"Group ID: '{id}' not found"}
logger.warning(error)
return JSONResponse(status_code=404, content=error)
try:
group_data = {
"is_active": is_active,
"date_update": datetime.now(),
}
logger.debug(group_data)
# create group
query = groups.update().where(groups.c.id == id)
group_result = await execute_one_db(query=query, values=group_data)
logger.debug(str(group_result))
# if "error" in group_result:
# error: dict = group_result
# logger.critical(error)
# return JSONResponse(status_code=400, content=error)
# data result
full_result: dict = {"id": id, "status": is_active}
logger.debug(full_result)
return JSONResponse(status_code=status.HTTP_201_CREATED, content=full_result)
except Exception as e:
error: dict = {"error": str(e)}
logger.debug(e)
logger.critical(error)
return JSONResponse(status_code=400, content=error)
@router.post(
"/create",
tags=["groups"],
response_description="The created item",
response_class=ORJSONResponse,
status_code=201,
responses={
# 302: {"description": "Incorrect URL, redirecting"},
400: {"description": "Bad Request"},
422: {"description": "Validation Error"},
# 404: {"description": "Operation forbidden"},
# 405: {"description": "Method not allowed"},
500: {"description": "All lines are busy, try again later."},
},
)
async def create_group(
*,
group: GroupCreate,
delay: int = Query(None, title=title, ge=1, le=10, alias="delay",),
) -> dict:
"""[summary]
Create a new group
Args:
group (GroupCreate): [description]
delay (int, optional): [description]. Defaults to Query(None,
title=title, ge=1, le=10, alias="delay",).
Returns:
dict: [description]
Group data
"""
# sleep if delay option is used
if delay is not None:
logger.info(f"adding a delay of {delay} seconds")
await asyncio.sleep(delay)
# approval or notification
group_type_check: list = ["approval", "notification"]
if group.group_type not in group_type_check:
error: dict = {
"error": f"Group Type '{group.group_type}'\
is not 'approval' or 'notification'"
}
logger.warning(error)
return JSONResponse(status_code=400, content=error)
check_name = str(group.name)
duplicate = await check_unique_name(check_name)
try:
if duplicate is False:
error: dict = {"error": f"Group Name '{group.name}' is a duplicate"}
logger.warning(error)
return JSONResponse(status_code=400, content=error)
group_id = uuid.uuid4()
group_data = {
"id": str(group_id),
"name": group.name,
"is_active": group.is_active,
"description": group.description,
"group_type": group.group_type,
"date_create": datetime.now(),
"date_update": datetime.now(),
}
logger.debug(group_data)
# create group
query = groups.insert()
group_result = await execute_one_db(query=query, values=group_data)
# if "error" in group_result:
# error: dict = group_result
# logger.critical(error)
# return JSONResponse(status_code=400, content=error)
# data result
full_result: dict = {"id": str(group_id), "data": group_result}
logger.debug(full_result)
return JSONResponse(status_code=status.HTTP_201_CREATED, content=full_result)
except Exception as e:
error: dict = {"error": str(e)}
logger.critical(error)
return JSONResponse(status_code=400, content=error)
@router.get("/group", tags=["groups"])
async def group_id(
group_id: str = Query(
None, title="Group ID", description="Get by the Group UUID", alias="groupId",
),
group_name: str = Query(
None,
title="Group Name",
description="Get by the Group Name",
alias="groupName",
),
delay: int = Query(
None,
title=title,
description="Seconds to delay (max 121)",
ge=1,
le=121,
alias="delay",
),
) -> dict:
"""[summary]
Get individual group data, including users
Args:
group_id (str, optional): [description]. Defaults to Query( None,
title="Group ID", description="Get by the Group UUID", alias="groupId", ).
group_name (str, optional): [description]. Defaults to Query( None,
title="Group Name", description="Get by the Group Name", alias="groupName", ).
delay (int, optional): [description]. Defaults to Query( None,
title=title, description="Seconds to delay (max 121)", ge=1, le=121, alias="delay", ).
Returns:
dict: [description]
Group data and associated users
"""
# sleep if delay option is used
if delay is not None:
await asyncio.sleep(delay)
# if search by ID
if group_id is not None:
id_exists = await check_id_exists(group_id)
if id_exists is False:
error: dict = {"error": f"Group ID: '{group_id}' not found"}
logger.warning(error)
return JSONResponse(status_code=404, content=error)
# elif search by name
elif group_name is not None:
name_exists = await check_unique_name(group_name)
if name_exists is True:
error: dict = {"error": f"Group Name: '{group_name}' not found"}
logger.warning(error)
return JSONResponse(status_code=404, content=error)
query = groups.select().where(groups.c.name == group_name)
name_result = await fetch_one_db(query=query)
group_id = name_result["id"]
# else at least one needs to be selected
else:
error: dict = {"error": "groupId or groupName must be used"}
logger.warning(error)
return JSONResponse(status_code=404, content=error)
query = groups_item.select().where(groups_item.c.group_id | |
block(config['channel_label_num'], 4, 1, order=['c', 'b', 's'], order_param=[conv_param, None, None])(x)
if config['feed_pos']:
return create_and_compile_model([inputs, in_pos], out, config)
else:
return create_and_compile_model(inputs, out, config)
def model_dilated_dense_net(self, config, len_dense=None, base_filter=32, param_dense_filter=None):
conv_param = config['convolution_parameter']
in_pos = None
def dense(x, f, rates, conv_param):
"""
:param f: type int: number of filter
:param rates: type list of int: list of positive ints
:param len_dense: type list of int: list of positive ints
:return:
"""
for i, rate in enumerate(rates):
conv_param['dilated_rate'] = rate
x = block(f, 3, 1, order=['b', 'r', 'c'], order_param=[None, None, conv_param])(x)
x = block(f, 3, 2, order=['b', 'r', 'c'], order_param=[None, None, conv_param])(x)
return x
if param_dense_filter is None: param_dense_filter = [1, 1]
f1, f2 = param_dense_filter[0], param_dense_filter[1]
inputs = Input(shape=(*config['patch_size'],) + (config['channel_img_num'],), name='inp0')
dilation_rates = [[1, 1, 2, 2, 3], [1, 1, 3], [2, 1], [1, 1], [1, 1]]
if len_dense == None: len_dense = [4, 4, 4, 4, 4]
shortcuts = []
x = inputs
for rates, l in zip(dilation_rates, len_dense):
x = dense_block(l, base_filter, conv_param)(x)
shortcuts.append(x)
x = dense(x, int(round((f1 + base_filter * l) * f2)), rates, conv_param)
if config['feed_pos']:
in_pos = Input(shape=(3,), name='input_position')
pos = Reshape(target_shape=(1, 1, 1, 3))(in_pos)
if config['pos_noise_stdv'] != 0: pos = GaussianNoise(config['pos_noise_stdv'])(pos)
pos = UpSampling3D(size=x.shape[1:4])(BatchNormalization()(pos))
x = Concatenate(axis=-1)([x, pos])
conv_param['dilated_rate'] = 1
for l, shortcut in reversed(list(zip(len_dense, shortcuts))):
x = dense_block(l, base_filter, conv_param)(x)
x = block(int(round((f1 + base_filter * l) * f2 / 2)), 3, 1, order=['b', 'r', 'c', 'up'],
order_param=[None, None, conv_param, None])(x)
x = Concatenate(axis=-1)([shortcut, x])
out = block(config['channel_label_num'], 4, 1, order=['c', 'b', 's'], order_param=[conv_param, None, None])(x)
if config['feed_pos']:
return create_and_compile_model([inputs, in_pos], out, config)
else:
return create_and_compile_model(inputs, out, config)
# UNet with double decoder
def model_U_net_double_decoder(self, config):
"UNet with double and parallel decoder path "
conv_param = config['convolution_parameter']
conv_param_dilated = config['convolution_parameter']
inputs = Input(shape=(*config['patch_size'],) + (config['channel_img_num'],), name='inp1')
x = inputs
filters = [config['filters'] * 2 ** i for i in range(5)]
filters_2 = [config['filters'] // 2 * 2 ** i for i in range(5)]
skip_layer = []
for index, f in enumerate(filters):
conv_param['dilation_rate'] = 1
x = block(f, 4, 2, order=['c', 'b', 'r'], order_param=[conv_param, None, None])(x)
conv_param_dilated['dilation_rate'] = 2 if index < 4 else 1
x = block(f, 4, 1, order=['c', 'b', 'r'], order_param=[conv_param_dilated, None, None])(x)
skip_layer.append(x)
x = skip_layer[-1]
if config['feed_pos']:
in_pos = Input(shape=(3,), name='input_position')
pos = Reshape(target_shape=(1, 1, 1, 3))(in_pos)
if config['pos_noise_stdv'] != 0: pos = GaussianNoise(config['pos_noise_stdv'])(pos)
pos = UpSampling3D(size=x.shape[1:4])(BatchNormalization()(pos))
x = Concatenate(axis=-1)([x, pos])
x_up = 0
for index, (sk, f, f_2) in enumerate(reversed(list(zip(skip_layer[:-1], filters[:-1], filters_2[:-1])))):
x = block(f, 4, 2, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(x)
x_1 = x
x = tf.concat([x, sk], axis=-1)
if index == 0:
x_up = block(f_2, 4, 2, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(x_1)
else:
x_1 = block(f_2, 4, 1, order=['c', 'b', 'r'], order_param=[conv_param, None, None])(x_1)
x_1 = tf.concat([x_1, x_up], axis=-1)
x_up = block(f_2, 4, 2, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(x_1)
x = block(filters[0] // 2, 4, 2, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(x)
x = x + x_up
out = block(config['channel_label_num'], 4, 1, order=['c', 'b', 's'], order_param=[conv_param, None, None])(x)
if config['feed_pos']:
return create_and_compile_model([inputs, in_pos], out, config)
else:
return create_and_compile_model(inputs, out, config)
# UNet with positional information handled in decoder
def model_U_net_position_decoder(self, config):
"""Network of merging the position info at each decoder """
conv_param = config['convolution_parameter']
inputs = Input(shape=(*config['patch_size'],) + (config['channel_img_num'],), name='inp1')
x = inputs
filters = [config['filters'] * 2 ** i for i in range(5)]
skip_layer = []
for f in filters:
x = block(f, 4, 2, order=['c', 'b', 'r'], order_param=[conv_param, None, None])(x)
skip_layer.append(x)
x = skip_layer[-1]
list_pos = []
if config['feed_pos']:
pos_filters = [config['filters'] * 2 ** i for i in range(4, -1, -1)]
in_pos = Input(shape=(3,), name='input_position')
pos = Reshape(target_shape=(1, 1, 1, 3))(in_pos)
if config['pos_noise_stdv'] != 0: pos = GaussianNoise(config['pos_noise_stdv'])(pos)
pos = UpSampling3D(size=x.shape[1:4])(BatchNormalization()(pos))
x = Concatenate(axis=-1)([x, pos])
for f in pos_filters:
pos = block(f, 4, 2, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(pos)
list_pos.append(pos)
for index, (sk, f) in enumerate(reversed(list(zip(skip_layer[:-1], filters[:-1])))):
x = block(f, 4, 2, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(x)
if config['feed_pos']:
x = tf.concat([x, sk, list_pos[index]], axis=-1)
else:
x = tf.concat([x, sk], axis=-1)
x = block(filters[0], 4, 2, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(x)
out = block(config['channel_label_num'], 4, 1, order=['c', 'b', 's'], order_param=[conv_param, None, None])(x)
if config['feed_pos']:
return create_and_compile_model([inputs, in_pos], out, config)
else:
return create_and_compile_model(inputs, out, config)
# UNet with attention focusing
def model_U_net_attention(self, config):
"Experimental"
conv_param = config['convolution_parameter']
inputs = Input(shape=(*config['patch_size'],) + (config['channel_img_num'],), name='inp1')
x = inputs
filters = [config['filters'] * 2 ** i for i in range(5)]
skip_layer = []
for f in filters:
x = block(f, 4, 2, order=['c', 'b', 'r'], order_param=[conv_param, None, None])(x)
skip_layer.append(x)
x = skip_layer[-1]
in_pos = None
if config['feed_pos']:
in_pos = Input(shape=(3,), name='input_position')
pos = Reshape(target_shape=(1, 1, 1, 3))(in_pos)
if config['pos_noise_stdv'] != 0: pos = GaussianNoise(config['pos_noise_stdv'])(pos)
pos = UpSampling3D(size=x.shape[1:4])(BatchNormalization()(pos))
x = Concatenate(axis=-1)([x, pos])
for sk, f in reversed(list(zip(skip_layer[:-1], filters[:-1]))):
x = block(f, 4, 2, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(x)
x = attention_layer_1(f, conv_param=conv_param, filters=16, alpha=1)(x, sk)
x = block(filters[0], 4, 2, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(x)
out = block(config['channel_label_num'], 4, 1, order=['c', 'b', 's'], order_param=[conv_param, None, None])(x)
if config['feed_pos']:
return create_and_compile_model([inputs, in_pos], out, config)
else:
return create_and_compile_model(inputs, out, config)
def model_U_net_2double_decoder(self, config):
"""2 cascade U Net double decoder"""
conv_param = config['convolution_parameter']
conv_param_dilated = config['convolution_parameter']
inputs = Input(shape=(*config['patch_size'],) + (config['channel_img_num'],), name='inp1')
x = inputs
filters = [config['filters'] * 2 ** i for i in range(5)]
filters_2 = [config['filters'] // 2 * 2 ** i for i in range(5)]
skip_layer = []
for index, f in enumerate(filters):
conv_param['dilation_rate'] = 1
x = block(f, 4, 2, order=['c', 'b', 'r'], order_param=[conv_param, None, None])(x)
conv_param_dilated['dilation_rate'] = 2 if index < 4 else 1
x = block(f, 4, 1, order=['c', 'b', 'r'], order_param=[conv_param_dilated, None, None])(x)
skip_layer.append(x)
x = skip_layer[-1]
if config['feed_pos']:
in_pos = Input(shape=(3,), name='input_position')
pos = Reshape(target_shape=(1, 1, 1, 3))(in_pos)
if config['pos_noise_stdv'] != 0: pos = GaussianNoise(config['pos_noise_stdv'])(pos)
pos = UpSampling3D(size=x.shape[1:4])(BatchNormalization()(pos))
x = Concatenate(axis=-1)([x, pos])
x_up = 0
for index, (sk, f, f_2) in enumerate(reversed(list(zip(skip_layer[:-1], filters[:-1], filters_2[:-1])))):
x = block(f, 4, 2, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(x)
x_1 = x
x = tf.concat([x, sk], axis=-1)
if index == 0:
x_up = block(f_2, 4, 2, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(x_1)
else:
x_1 = block(f_2, 4, 1, order=['c', 'b', 'r'], order_param=[conv_param, None, None])(x_1)
x_1 = tf.concat([x_1, x_up], axis=-1)
x_up = block(f_2, 4, 2, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(x_1)
x = block(filters[0] // 2, 4, 2, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(x)
x = x + x_up
skip_layer = []
for index, f in enumerate(filters):
conv_param['dilation_rate'] = 1
x = block(f, 4, 2, order=['c', 'b', 'r'], order_param=[conv_param, None, None])(x)
conv_param_dilated['dilation_rate'] = 2 if index < 4 else 1
x = block(f, 4, 1, order=['c', 'b', 'r'], order_param=[conv_param_dilated, None, None])(x)
skip_layer.append(x)
x = skip_layer[-1]
if config['feed_pos']:
x = Concatenate(axis=-1)([x, pos])
x_up = 0
for index, (sk, f, f_2) in enumerate(reversed(list(zip(skip_layer[:-1], filters[:-1], filters_2[:-1])))):
x = block(f, 4, 2, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(x)
x_1 = x
x = tf.concat([x, sk], axis=-1)
if index == 0:
x_up = block(f_2, 4, 2, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(x_1)
else:
x_1 = block(f_2, 4, 1, order=['c', 'b', 'r'], order_param=[conv_param, None, None])(x_1)
x_1 = tf.concat([x_1, x_up], axis=-1)
x_up = block(f_2, 4, 2, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(x_1)
x = block(filters[0] // 2, 4, 2, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(x)
x = x + x_up
out = block(config['channel_label_num'], 4, 1, order=['c', 'b', 's'], order_param=[conv_param, None, None])(x)
if config['feed_pos']:
return create_and_compile_model([inputs, in_pos], out, config)
else:
return create_and_compile_model(inputs, out, config)
def model_body_identification_hybrid(self, config):
'''
Model is build after Philip Wolfs (ISS master student) model
Changed output shape and removed one dense layer at the end
'''
inputs = Input(shape=config['patch_size'], name='input_layer')
n_base_filter = 32
reshaped = Reshape([config['patch_size'][1], config['patch_size'][2], 1])(inputs)
in_pos = Input(shape=(3,), name='input_position')
# Some convolutional layers
conv_1 = Conv2D(n_base_filter,
kernel_size=(2, 2),
padding='same',
activation='relu')(reshaped)
conv_2 = Conv2D(n_base_filter,
kernel_size=(2, 2),
padding='same',
activation='relu')(conv_1)
conv_2 = MaxPooling2D(pool_size=(3, 3), | |
"next-hop": li.ip.split('/')[0]
}
if preference > 0:
static_entry[
"preference"] = preference
preference += 1
srs = static_routes.setdefault(
ri_obj.service_chain_address, [])
srs.append(static_entry)
return static_routes
# end compute_pnf_static_route
def push_config(self):
if not self.config_manager:
self._logger.info(
"Plugin not found for vendor family(%s:%s), "
"ip: %s, not pushing config" % (str(self.vendor),
str(self.product),
self.management_ip))
return
if self.managed_state in ('rma', 'error', 'maintenance'):
# do not push any config to this device
self._logger.debug(
"No config push for PR(%s) in %s state" % (self.name,
self.managed_state))
return
if self.delete_config() or not self.is_vnc_managed():
return
self.config_manager.initialize()
if not self.config_manager.validate_device():
self._logger.error(
"physical router: %s, device config validation failed. "
"device configuration=%s"
% (self.uuid, str(self.config_manager.get_device_config())))
return
if self.use_ansible_plugin():
feature_configs = {}
for plugin in self.plugins:
feature_config = plugin.feature_config()
if feature_config:
feature_configs[feature_config.name] = feature_config
config_size = self.config_manager.push_conf(
feature_configs=feature_configs)
else:
config_size = self.config_manager.push_conf()
if not config_size:
return
self.set_conf_sent_state(True)
self.uve_send()
if self.config_manager.retry():
# failed commit: set repush interval upto max value
self.config_repush_interval = min(
[2 * self.config_repush_interval,
PushConfigState.get_repush_max_interval()])
self.block_and_set_config_state(self.config_repush_interval)
else:
# successful commit: reset repush interval to base
self.config_repush_interval = PushConfigState.get_repush_interval()
if PushConfigState.get_push_delay_enable():
# sleep, delay=compute max delay between two successive commits
gevent.sleep(self.get_push_config_interval(config_size))
# end push_config
def get_push_config_interval(self, last_config_size):
config_delay = int(
(old_div(last_config_size, 1000)) *
PushConfigState.get_push_delay_per_kb())
delay = min([PushConfigState.get_push_delay_max(), config_delay])
return delay
def is_service_port_id_valid(self, service_port_id):
# mx allowed ifl unit number range is (1, 16385) for service ports
if service_port_id < 1 or service_port_id > 16384:
return False
return True
# end is_service_port_id_valid
def uve_send(self, deleted=False):
pr_trace = UvePhysicalRouterConfig(
name=self.name,
ip_address=self.management_ip,
connected_bgp_router=self.bgp_router,
auto_conf_enabled=self.vnc_managed,
product_info=str(self.vendor) + ':' + str(self.product))
if deleted:
pr_trace.deleted = True
pr_msg = UvePhysicalRouterConfigTrace(
data=pr_trace,
sandesh=DBBaseDM._sandesh)
pr_msg.send(sandesh=DBBaseDM._sandesh)
return
commit_stats = {}
if self.config_manager:
commit_stats = self.config_manager.get_commit_stats()
if self.is_vnc_managed():
pr_trace.netconf_enabled_status = True
pr_trace.last_commit_time = commit_stats.get(
'last_commit_time', '')
pr_trace.last_commit_duration = commit_stats.get(
'last_commit_duration', '0')
pr_trace.commit_status_message = commit_stats.get(
'commit_status_message', '')
pr_trace.total_commits_sent_since_up = commit_stats.get(
'total_commits_sent_since_up', 0)
else:
pr_trace.netconf_enabled_status = False
pr_msg = UvePhysicalRouterConfigTrace(
data=pr_trace, sandesh=DBBaseDM._sandesh)
pr_msg.send(sandesh=DBBaseDM._sandesh)
# end uve_send
def _is_routing_poilcy_supported(self, rp_obj):
if not rp_obj:
return False
return True
# end _is_routing_policy_supported
def _build_routing_policies_list(self, rp_list, rp_obj_list, rp_params,
imported):
if not rp_params:
return
keyname = 'import_routing_policy_uuid'
if imported is False:
keyname = 'export_routing_policy_uuid'
for rp_uuid in rp_params.get(keyname) or []:
rp_obj = RoutingPolicyDM.get(rp_uuid)
# only include routing policy who has abstract config
# supported protocol.
if self._is_routing_poilcy_supported(rp_obj) is True:
rp_name = rp_obj.name
rp_list.append(rp_name)
if rp_name not in rp_obj_list:
rp_obj_list[rp_name] = rp_obj
# end _build_routing_policies_list
def _set_proto_routing_policies_for_routed_vn(self, rp_obj_list, proto,
vn_obj, rp_params):
if not rp_params:
return
rp_imported_list = []
rp_exported_list = []
self._build_routing_policies_list(rp_imported_list, rp_obj_list,
rp_params, imported=True)
self._build_routing_policies_list(rp_exported_list, rp_obj_list,
rp_params, imported=False)
rp_param_obj = AbstractDevXsd.RoutingPolicyParameters(
import_routing_policies=rp_imported_list,
export_routing_policies=rp_exported_list)
proto.set_routing_policies(rp_param_obj)
# end _set_proto_routing_policies_for_routed_vn
def _set_routed_vn_ospf_info(self, ri, rp_obj_list, vn_obj,
routed_param):
protocols = AbstractDevXsd.RoutingInstanceProtocols()
ospf_info = routed_param.get('ospf_params', None)
bfd_info = routed_param.get('bfd_params', None)
rp_params = routed_param.get('routing_policy_params', None)
if not ospf_info:
return
ospf_name = vn_obj.name + '_ospf'
key = ''
auth_key_data = ospf_info.get('auth_data', None)
if auth_key_data:
for key_data in auth_key_data.get('key_items', []):
key = key_data.get('key', '')
if key.lower().startswith(('$9$', '$1$', '$5$', '$6$')):
key = '"%s"' % key
intf = 'irb.' + str(vn_obj.vn_network_id)
intf_type = None
if not self.is_vn_part_vpg_and_multihomed(vn_obj):
intf_type = 'p2p'
ospf_obj = AbstractDevXsd.Ospf(name=ospf_name,
authentication_key=key,
interface=intf,
interface_type=intf_type,
hello_interval=ospf_info.get(
'hello_interval'),
dead_interval=ospf_info.get(
'dead_interval'),
area_id=ospf_info.get('area_id'),
area_type=ospf_info.get('area_type'),
advertise_loopback=ospf_info.get(
'advertise_loopback'),
orignate_summary_lsa=ospf_info.get(
'orignate_summary_lsa'))
ospf_obj.set_comment('Routed VN OSPF info')
if bfd_info:
bfd = AbstractDevXsd.Bfd(rx_tx_interval=bfd_info.get(
'time_interval'),
detection_time_multiplier=bfd_info.get(
'detection_time_multiplier'))
ospf_obj.set_bfd(bfd)
self._set_proto_routing_policies_for_routed_vn(rp_obj_list, ospf_obj,
vn_obj, rp_params)
protocols.add_ospf(ospf_obj)
ri.add_protocols(protocols)
# end _set_routed_vn_ospf_info
def _set_internal_vn_routed_bgp_info(self, ri, rp_obj_list, vn_obj,
routed_param):
protocols = AbstractDevXsd.RoutingInstanceProtocols()
bfd_info = routed_param.get('bfd_params', None)
bgp_info = routed_param.get('bgp_params', None)
rp_params = routed_param.get('routing_policy_params', None)
if not bgp_info:
return
bgp_name = vn_obj.name + '_bgp'
key = ''
auth_key_data = bgp_info.get('auth_data', None)
if auth_key_data:
for key_data in auth_key_data.get('key_items', []):
key = key_data.get('key', '')
if key.lower().startswith(('$9$', '$1$', '$5$', '$6$')):
key = '"%s"' % key
local_asnv = bgp_info.get(
'local_autonomous_system', None) or self.brownfield_global_asn
bgp = AbstractDevXsd.Bgp(name=bgp_name,
type_="external",
autonomous_system=local_asnv,
authentication_key=key,
multihop=AbstractDevXsd.MultiHop(
ttl=bgp_info.get('multihop_ttl', None)))
if routed_param.get('loopback_ip_address', None) is not None:
bgp.set_ip_address(routed_param.get('loopback_ip_address'))
# this is for backward compatibility. from 2005 onwards
# peer_ip_address_list is expected and in case DM gets both
# peer_ip_address_list would be given priority.
if bgp_info.get('peer_ip_address_list', None):
for peer_ip in bgp_info.get('peer_ip_address_list') or []:
peer_bgp = AbstractDevXsd.Bgp(name=peer_ip,
autonomous_system=bgp_info.get(
'peer_autonomous_system'),
ip_address=peer_ip)
bgp.add_peers(peer_bgp)
else:
peer_ip = bgp_info.get('peer_ip_address')
peer_bgp = AbstractDevXsd.Bgp(name=peer_ip,
autonomous_system=bgp_info.get(
'peer_autonomous_system'),
ip_address=peer_ip)
bgp.add_peers(peer_bgp)
bgp.set_comment('Routed VN BGP info')
if bfd_info:
bfd = AbstractDevXsd.Bfd(
rx_tx_interval=bfd_info.get('time_interval'),
detection_time_multiplier=bfd_info.get(
'detection_time_multiplier'))
bgp.set_bfd(bfd)
self._set_proto_routing_policies_for_routed_vn(rp_obj_list, bgp,
vn_obj, rp_params)
protocols.add_bgp(bgp)
ri.add_protocols(protocols)
# end _set_internal_vn_routed_bgp_info
def _set_routed_vn_static_route_info(self, ri, vn_obj, routed_param):
static_route = routed_param.get('static_route_params', None)
bfd_info = routed_param.get('bfd_params', None)
if static_route is None:
return
irt_uuid = static_route.get('interface_route_table_uuid', None)
ip_prefix = set()
for irt in irt_uuid or []:
irt_obj = InterfaceRouteTableDM.get(irt)
if irt_obj:
for prefix in irt_obj.prefix.keys():
ip_prefix.add(prefix)
for ip in ip_prefix:
route = AbstractDevXsd.Route(
prefix=ip, prefix_len=32,
next_hop=static_route.get('next_hop_ip_address')[-1],
comment='Routed VN static route')
if bfd_info:
bfd = AbstractDevXsd.Bfd(
rx_tx_interval=bfd_info.get('time_interval'),
detection_time_multiplier=bfd_info.get(
'detection_time_multiplier'))
route.set_bfd(bfd)
ri.add_static_routes(route)
# end set_routed_vn_static_route_info
def is_vn_part_vpg_and_multihomed(self, vn_obj):
vmi_list = vn_obj.virtual_machine_interfaces
for vmi_uuid in vmi_list or []:
vmi = VirtualMachineInterfaceDM.get(vmi_uuid)
if vmi and not vmi.virtual_port_group:
continue
vpg = VirtualPortGroupDM(vmi.virtual_port_group)
if vpg and vpg.pi_ae_map is not None:
for pr_uuid in vpg.physical_interfaces or []:
if pr_uuid not in self.physical_interfaces:
return True
return False
# end is_vn_part_vpg_and_multihomed
def get_bd_li_map(self, vn_obj):
vn_dict = {}
bd_name = "bd-" + str(vn_obj.vn_network_id)
vmi_list = vn_obj.virtual_machine_interfaces
for vmi_uuid in vmi_list or []:
vmi = VirtualMachineInterfaceDM.get(vmi_uuid)
if self.fabric_obj.enterprise_style:
vlan_tag = 0
else:
vlan_tag = vmi.vlan_tag
if vmi and not vmi.virtual_port_group:
continue
vpg = VirtualPortGroupDM(vmi.virtual_port_group)
if vpg:
for pi_uuid in vpg.physical_interfaces or []:
pi = PhysicalInterfaceDM(pi_uuid, None)
if pi and pi.physical_router == self.uuid:
ae_id = vpg.pi_ae_map.get(pi_uuid)
if ae_id is not None and vlan_tag is not None:
ae_name = "ae" + str(ae_id) + "." + str(vlan_tag)
vn_dict.setdefault(bd_name, []).append(ae_name)
else:
li_name = pi.name + "." + str(vlan_tag)
vn_dict.setdefault(bd_name, []).append(li_name)
return vn_dict
def _set_routed_vn_pim_info(self, ri, vn_obj, routed_param, rproto):
rp = AbstractDevXsd.RoutingProtocol()
protocols = AbstractDevXsd.RoutingInstanceProtocols()
pim_params = routed_param.get('pim_params', None)
if not pim_params:
return
rp_ip = pim_params.get('rp_ip_address', None)
pim_mode = pim_params.get('mode', None)
flag_eoai = pim_params.get('enable_all_interfaces', False)
bfd_info = routed_param.get('bfd_params', None)
pim_intf = None
if not flag_eoai:
intf = 'irb.' + str(vn_obj.vn_network_id)
else:
intf = 'all'
pim_intf = [AbstractDevXsd.PimInterface(
interface=AbstractDevXsd.Reference(intf))]
# Create IGMP anstract config
igmp_name = "igmp-" + str(vn_obj.vn_network_id)
igmp_interface = [AbstractDevXsd.Reference(intf)]
igmp = AbstractDevXsd.Igmp(name=igmp_name,
comment='Routed VN IGMP config',
interfaces=igmp_interface)
rp.add_igmp(igmp)
pim_obj = AbstractDevXsd.Pim(rp=rp_ip,
mode=pim_mode,
pim_interfaces=pim_intf,
enable_on_all_interfaces=flag_eoai)
pim_obj.set_comment('Routed VN PIM info')
if bfd_info:
bfd = AbstractDevXsd.Bfd(
rx_tx_interval=bfd_info.get('time_interval'),
detection_time_multiplier=bfd_info.get(
'detection_time_multiplier'))
pim_obj.set_bfd(bfd)
protocols.add_pim(pim_obj)
ri.add_protocols(protocols)
# Create IGMP Snooping Config
bd_li_map = self.get_bd_li_map(vn_obj)
if len(bd_li_map) > 0:
igs_name = "igmp-snoop-" + str(vn_obj.vn_network_id)
igs_comment = "Routed VN IGMP SNOOPING"
igs = AbstractDevXsd.IgmpSnooping(name=igs_name,
comment=igs_comment)
for bd in bd_li_map.keys() or None:
vlan_name = bd
vlan = AbstractDevXsd.Vlan(name=vlan_name)
intf_lst = vlan.get_interfaces()
for intf in bd_li_map[bd]:
if not any(v.get_name() == intf for v in intf_lst):
intf_lst.append(AbstractDevXsd.Reference(name=intf))
if not any(v.get_name() == vlan for v in igs.get_vlans()):
igs.get_vlans().append(vlan)
rp.add_igmp_snooping(igs)
rproto.append(rp)
# end _set_routed_vn_pim_info
def set_routing_vn_proto_in_ri(self, ri, rp, vn_list,
is_loopback_vn=False, lr_uuid=None,
rproto=[]):
rp_obj_list = {}
for vn in vn_list or []:
vn_obj = VirtualNetworkDM.get(vn)
if vn_obj is None or vn_obj.virtual_network_category != 'routed':
continue
for route_param in vn_obj.routed_properties or []:
if self.uuid == route_param.get('physical_router_uuid'):
if is_loopback_vn:
if route_param.get('logical_router_uuid', None) !=\
lr_uuid:
continue
if route_param.get('routing_protocol') == 'bgp':
self._set_internal_vn_routed_bgp_info(ri, rp_obj_list,
vn_obj,
route_param)
elif route_param.get('routing_protocol')\
== 'static-routes':
self._set_routed_vn_static_route_info(ri, vn_obj,
route_param)
elif route_param.get('routing_protocol') \
== 'ospf':
self._set_routed_vn_ospf_info(ri, rp_obj_list, vn_obj,
route_param)
elif route_param.get('routing_protocol') == 'pim':
self._set_routed_vn_pim_info(ri, vn_obj,
route_param, rproto)
RoutingPolicyDM.create_abstract_routing_policies(rp, rp_obj_list)
# end set_routing_vn_proto_in_ri
# end PhysicalRouterDM
class GlobalVRouterConfigDM(DBBaseDM):
_dict = {}
obj_type = 'global_vrouter_config'
global_vxlan_id_mode = None
global_forwarding_mode = None
global_encapsulation_priorities = []
global_encapsulation_priority = None
def __init__(self, uuid, obj_dict=None):
"""Global VRouter Config Object"""
self.uuid = uuid
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
new_global_vxlan_id_mode = obj.get('vxlan_network_identifier_mode')
new_global_encapsulation_priority = None
new_global_encapsulation_priorities = []
encapsulation_priorities = obj.get('encapsulation_priorities')
if encapsulation_priorities:
new_global_encapsulation_priorities = encapsulation_priorities.get(
"encapsulation")
if new_global_encapsulation_priorities:
new_global_encapsulation_priority = \
new_global_encapsulation_priorities[0]
new_global_forwarding_mode = obj.get('forwarding_mode')
if (GlobalVRouterConfigDM.global_vxlan_id_mode !=
new_global_vxlan_id_mode or
GlobalVRouterConfigDM.global_forwarding_mode !=
new_global_forwarding_mode or
GlobalVRouterConfigDM.global_encapsulation_priorities !=
new_global_encapsulation_priorities or
GlobalVRouterConfigDM.global_encapsulation_priority !=
new_global_encapsulation_priority):
GlobalVRouterConfigDM.global_vxlan_id_mode = \
new_global_vxlan_id_mode
GlobalVRouterConfigDM.global_forwarding_mode = \
new_global_forwarding_mode
GlobalVRouterConfigDM.global_encapsulation_priorities = \
new_global_encapsulation_priorities
GlobalVRouterConfigDM.global_encapsulation_priority = \
new_global_encapsulation_priority
self.update_physical_routers()
# end update
def update_physical_routers(self):
for pr in list(PhysicalRouterDM.values()):
pr.set_config_state()
# end update_physical_routers
@classmethod
def is_global_vxlan_id_mode_auto(cls):
if (cls.global_vxlan_id_mode is not None and
cls.global_vxlan_id_mode == 'automatic'):
return True
return False
# end GlobalVRouterConfigDM
class GlobalSystemConfigDM(DBBaseDM):
_dict = {}
obj_type = 'global_system_config'
global_asn = | |
import json
from importlib import import_module
import logging
from pathlib import Path
import pkg_resources
import shutil
import sys
import tarfile
from furl import furl
import requests
from semver import max_satisfying
from django.apps import apps
from django.utils.encoding import (
force_bytes, force_text, python_2_unicode_compatible
)
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils.six import PY3
from django.utils.termcolors import colorize
from django.utils.translation import ugettext_lazy as _, ugettext
from mayan.apps.common.compat import FileNotFoundErrorException
from mayan.apps.common.utils import resolve_attribute
from mayan.apps.storage.utils import mkdtemp, patch_files as storage_patch_files
from .algorithms import HashAlgorithm
from .environments import environment_production
from .exceptions import DependenciesException
logger = logging.getLogger(name=__name__)
class Provider(object):
"""Base provider class"""
class PyPIRespository(Provider):
url = 'https://pypi.org/'
class GoogleFontsProvider(Provider):
url = 'https://fonts.googleapis.com/'
class NPMRegistryRespository(Provider):
url = 'http://registry.npmjs.com'
class OperatingSystemProvider(Provider):
"""Placeholder for the OS provider"""
@python_2_unicode_compatible
class DependencyGroup(object):
_registry = {}
@classmethod
def get(cls, name):
return cls._registry[name]
@classmethod
def get_all(cls):
return sorted(cls._registry.values(), key=lambda x: x.label)
def __init__(self, attribute_name, label, name, help_text=None):
self.attribute_name = attribute_name
self.label = label
self.help_text = help_text
self.name = name
self.__class__._registry[name] = self
def __str__(self):
return force_text(self.label)
def get_entries(self):
results = Dependency.get_values_of_attribute(
attribute_name=self.attribute_name
)
result = []
for entry in results:
result.append(
DependencyGroupEntry(
dependency_group=self, help_text=entry['help_text'],
label=entry['label'], name=entry['value']
)
)
return sorted(result, key=lambda x: x.label)
def get_entry(self, entry_name):
for entry in self.get_entries():
if entry.name == entry_name:
return entry
raise KeyError('Entry not found.')
@python_2_unicode_compatible
class DependencyGroupEntry(object):
def __init__(self, dependency_group, label, name, help_text=None):
self.dependency_group = dependency_group
self.help_text = help_text or ''
self.label = label
self.name = name
def __str__(self):
return force_text(self.label)
def get_dependencies(self):
dependencies = Dependency.get_for_attribute(
attribute_name=self.dependency_group.attribute_name,
attribute_value=self.name
)
return Dependency.return_sorted(dependencies=dependencies)
class Dependency(object):
_registry = {}
@staticmethod
def initialize():
for app in apps.get_app_configs():
try:
import_module('{}.dependencies'.format(app.name))
except ImportError as exception:
if force_text(exception) not in ('No module named dependencies', 'No module named \'{}.dependencies\''.format(app.name)):
logger.error(
'Error importing %s dependencies.py file; %s', app.name,
exception
)
@staticmethod
def return_sorted(dependencies):
return sorted(dependencies, key=lambda x: x.get_label())
@classmethod
def check_all(cls, as_csv=False, use_color=False):
if as_csv:
template = '{},{},{},{},{},{},{}'
else:
template = '{:<35}{:<11} {:<15} {:<20} {:<15} {:<30} {:<10}'
if not as_csv:
print('\n ', end='')
print(
template.format(
ugettext('Name'), ugettext('Type'), ugettext('Version'),
ugettext('App'), ugettext('Environment'),
ugettext('Other data'), ugettext('Check')
)
)
if not as_csv:
print('-' * 140)
for dependency in cls.get_all():
check = dependency.check()
if not as_csv and not check and dependency.environment.mark_missing:
check = '* {} *'.format(check)
if use_color:
check = colorize(
text=check, fg='red', opts=('bold', 'blink', 'reverse')
)
if not as_csv:
print('* ', end='')
print(
template.format(
dependency.name,
force_text(dependency.class_name_verbose_name),
force_text(dependency.get_version_string()),
force_text(dependency.app_label_verbose_name()),
force_text(dependency.get_environment_verbose_name()),
force_text(dependency.get_other_data()),
force_text(check),
)
)
sys.stdout.flush()
@classmethod
def get(cls, pk):
return cls._registry[pk]
@classmethod
def get_all(cls, subclass_only=False):
dependencies = cls._registry.values()
if subclass_only:
dependencies = [dependency for dependency in dependencies if isinstance(dependency, cls)]
return Dependency.return_sorted(dependencies=dependencies)
@classmethod
def get_for_attribute(cls, attribute_name, attribute_value, **kwargs):
result = []
for dependency in cls.get_all(**kwargs):
if resolve_attribute(attribute=attribute_name, obj=dependency) == attribute_value:
result.append(dependency)
return result
@classmethod
def get_values_of_attribute(cls, attribute_name):
result = []
for dependency in cls.get_all():
value = resolve_attribute(attribute=attribute_name, obj=dependency)
try:
label = resolve_attribute(
attribute='{}_verbose_name'.format(attribute_name),
obj=dependency
)
except AttributeError:
label = value
try:
help_text = resolve_attribute(
attribute='{}_help_text'.format(attribute_name),
obj=dependency
)
except AttributeError:
help_text = None
dictionary = {'label': label, 'help_text': help_text, 'value': value}
if dictionary not in result:
result.append(dictionary)
return result
@classmethod
def install_multiple(cls, app_label=None, force=False, subclass_only=False):
for dependency in cls.get_all(subclass_only=subclass_only):
if app_label:
if app_label == dependency.app_label:
dependency.install(force=force)
else:
dependency.install(force=force)
def __init__(
self, name, app_label=None, copyright_text=None, help_text=None,
environment=environment_production, label=None, module=None,
replace_list=None, version_string=None
):
self._app_label = app_label
self.copyright_text = copyright_text
self.environment = environment
self.help_text = help_text
self.label = label
self.module = module
self.name = name
self.package_metadata = None
self.replace_list = replace_list
self.repository = self.provider_class()
self.version_string = version_string
if not app_label:
if not module:
raise DependenciesException(
_('Need to specify at least one: app_label or module.')
)
if self.get_pk() in self.__class__._registry:
raise DependenciesException(
_('Dependency "%s" already registered.') % self.name
)
self.__class__._registry[self.get_pk()] = self
@cached_property
def app_label(self):
if not self._app_label:
app = apps.get_containing_app_config(object_name=self.module)
return app.label
else:
return self._app_label
def app_label_verbose_name(self):
return apps.get_app_config(app_label=self.app_label).verbose_name
def download(self):
"""
Download the dependency from a repository
"""
raise NotImplementedError
def get_copyright(self):
return self.copyright_text or ''
def install(self, force=False):
print(_('Installing package: %s... ') % self.get_label_full(), end='')
sys.stdout.flush()
if not force:
if self.check():
print(_('Already installed.'))
else:
self._install()
print(_('Complete.'))
sys.stdout.flush()
else:
if self.replace_list:
self.patch_files()
print(_('Complete.'))
sys.stdout.flush()
self.patch_files()
print(_('Complete.'))
sys.stdout.flush()
def _install(self):
raise NotImplementedError
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__, self.name)
def check(self):
"""
Returns the version found or an exception
"""
if self._check():
return True
else:
return False
def check_string(self):
if self._check():
return 'True'
else:
return 'False'
def check_string_verbose_name(self):
if self._check():
return _('Installed and correct version')
else:
return _('Missing or incorrect version')
def _check(self):
raise NotImplementedError
def get_help_text(self):
return self.help_text or ''
def get_environment(self):
return self.environment.name
def get_environment_help_text(self):
return self.environment.help_text
def get_environment_verbose_name(self):
return self.environment.label
def get_label(self):
return self.label or self.name
def get_label_full(self):
if self.version_string:
version_string = '({})'.format(self.version_string)
else:
version_string = ''
return '{} {}'.format(self.get_label(), version_string)
def get_other_data(self):
return _('None')
def get_pk(self):
return self.name
def get_url(self):
raise NotImplementedError
def get_version_string(self):
return self.version_string or _('Not specified')
def patch_files(self, path=None, replace_list=None):
print(_('Patching files... '), end='')
try:
sys.stdout.flush()
except AttributeError:
pass
if not path:
path = self.get_install_path()
if not replace_list:
replace_list = self.replace_list
storage_patch_files(path=path, replace_list=replace_list)
def verify(self):
"""
Verify the integrity of the dependency
"""
raise NotImplementedError
# Depedency subclasses
class BinaryDependency(Dependency):
class_name = 'binary'
class_name_help_text = _(
'Executables that are called directly by the code.'
)
class_name_verbose_name = _('Binary')
provider_class = OperatingSystemProvider
def __init__(self, *args, **kwargs):
self.path = kwargs.pop('path')
super(BinaryDependency, self).__init__(*args, **kwargs)
def _check(self):
return Path(self.path).exists()
def get_other_data(self):
return 'Path: {}'.format(self.path)
class JavaScriptDependency(Dependency):
class_name = 'javascript'
class_name_help_text = _(
'JavaScript libraries downloaded the from NPM registry and used for '
'front-end functionality.'
)
class_name_verbose_name = _('JavaScript')
provider_class = NPMRegistryRespository
def __init__(self, *args, **kwargs):
self.static_folder = kwargs.pop('static_folder', None)
return super(JavaScriptDependency, self).__init__(*args, **kwargs)
def _check(self):
try:
package_info = self._read_package_file()
except FileNotFoundErrorException:
return False
if PY3:
versions = [package_info['version']]
version_string = self.version_string
else:
versions = [force_bytes(package_info['version'])]
version_string = force_bytes(self.version_string)
return max_satisfying(
versions=versions, range_=version_string,
loose=True
)
def _read_package_file(self):
path_install_path = self.get_install_path()
path_package = path_install_path / 'package.json'
with path_package.open(mode='rb') as file_object:
return json.load(file_object)
def _install(self, include_dependencies=False):
self.get_metadata()
print(_('Downloading... '), end='')
sys.stdout.flush()
self.download()
print(_('Verifying... '), end='')
sys.stdout.flush()
self.verify()
print(_('Extracting... '), end='')
sys.stdout.flush()
self.extract()
if include_dependencies:
for name, version_string in self.version_metadata.get('dependencies', {}).items():
dependency = JavaScriptDependency(
name=name, version_string=version_string
)
dependency.install(include_dependencies=False)
def extract(self, replace_list=None):
temporary_directory = mkdtemp()
path_compressed_file = self.get_tar_file_path()
with tarfile.open(name=force_text(path_compressed_file), mode='r') as file_object:
file_object.extractall(path=temporary_directory)
self.patch_files(path=temporary_directory, replace_list=replace_list)
path_install = self.get_install_path()
# Clear the installation path of previous content
shutil.rmtree(path=force_text(path_install), ignore_errors=True)
# Scoped packages are nested under a parent directory
# create it to avoid rename errors.
path_install.mkdir(parents=True)
# Copy the content under the dependency's extracted content folder
# 'package' to the final location.
# We do a copy and delete instead of move because os.rename doesn't
# support renames across filesystems.
path_uncompressed_package = Path(temporary_directory, 'package')
shutil.rmtree(force_text(path_install))
shutil.copytree(
force_text(path_uncompressed_package), force_text(path_install)
)
shutil.rmtree(force_text(path_uncompressed_package))
# Clean up temporary directory used for download
shutil.rmtree(path=temporary_directory, ignore_errors=True)
shutil.rmtree(path=self.path_cache, ignore_errors=True)
def download(self):
self.path_cache = mkdtemp()
with requests.get(self.version_metadata['dist']['tarball'], stream=True) as response:
response.raise_for_status()
with self.get_tar_file_path().open(mode='wb') as file_object:
shutil.copyfileobj(fsrc=response.raw, fdst=file_object)
def get_best_version(self):
# PY3
# node-semver does a direct str() comparison which means
# different things on PY2 and PY3
# Typecast to str in PY3 which is unicode and
# bytes in PY2 which is str to fool node-semver
if PY3:
versions = self.versions
version_string = self.version_string
else:
versions = [force_bytes(version) for version in self.versions]
version_string = force_bytes(self.version_string)
return max_satisfying(
versions=versions, range_=version_string, loose=True
)
def get_copyright(self):
path_install_path = self.get_install_path()
for entry in path_install_path.glob(pattern='LICENSE*'):
with entry.open(mode='rb') as file_object:
return force_text(file_object.read())
copyright_text = []
try:
package_info = self._read_package_file()
except FileNotFoundErrorException:
return super(JavaScriptDependency, self).get_copyright()
else:
copyright_text.append(
package_info.get('license') or package_info.get(
'licenses'
)[0]['type']
)
author = package_info.get('author', {})
try:
author = author.get('name')
except AttributeError:
pass
copyright_text.append(author or '')
return '\n'.join(copyright_text)
def get_help_text(self):
description = None
try:
description = self._read_package_file().get('description')
except FileNotFoundErrorException:
return super(JavaScriptDependency, self).get_help_text()
else:
return description
def get_install_path(self):
app = apps.get_app_config(app_label=self.app_label)
result = Path(
app.path, 'static', self.static_folder or app.label,
'node_modules', self.name
)
return result
def get_metadata(self):
response = requests.get(url=self.get_url())
self.package_metadata = response.json()
self.versions = self.package_metadata['versions'].keys()
self.version_best = self.get_best_version()
try:
self.version_metadata = self.package_metadata['versions'][
self.version_best
]
except KeyError:
raise DependenciesException(
'Best version for dependency %s is not found in '
'upstream repository.', self.version_best
)
def get_tar_file_path(self):
return Path(
self.path_cache, self.get_tar_filename()
)
def get_tar_filename(self):
return furl(
self.version_metadata['dist']['tarball']
).path.segments[-1]
def get_url(self):
url = furl(self.repository.url)
url.path.segments = url.path.segments + [self.name]
return url.tostr()
def verify(self):
path_tar_file = self.get_tar_file_path()
try:
integrity = | |
= "meta" in prefixes
clazz.broadcast = "broadcast" in prefixes
clazz.urgent = "urgent" in prefixes
return prefixes, clazz
def bounded_int_type(evaluator, ast, state):
"""Evaluates "int[a,b]"."""
uppaal_clazz_name = f'bounded_int' # f'Uppaal_bounded_int'
lower_val = evaluator.eval_ast(ast["lower"], state)
upper_val = evaluator.eval_ast(ast["upper"], state)
new_clazz = UppaalBoundedInt.make_new_type(name=uppaal_clazz_name, bounds=(lower_val, upper_val))
return new_clazz
def scalar_type(evaluator, ast, state):
"""Evaluates "scalar[n]"."""
uppaal_clazz_name = f'scalar' # f'Uppaal_scalar'
size = evaluator.eval_ast(ast["expr"], state)
new_clazz = UppaalScalar.make_new_type(name=uppaal_clazz_name, size=size)
return new_clazz
def struct_type(evaluator, ast, state):
"""Evaluates "struct {...}"."""
uppaal_clazz_name = f'struct' # f'Uppaal_struct'
field_classes = []
for field_ast in ast["fields"]:
field_vars_data = evaluator.eval_ast(field_ast, state)
field_classes.extend(field_vars_data)
new_clazz = UppaalStruct.make_new_type(name=uppaal_clazz_name, field_classes=field_classes)
return new_clazz
def custom_type(_evaluator, ast, state):
"""Evaluates int, bool, ... type."""
custom_type_name = ast["type"]
base_clazz_name = f'{custom_type_name}' # f'Uppaal_{custom_type_name}'
base_clazz = state.get(base_clazz_name)
# TODO: Implement another concept to handle type quantifiers (so that copying classes each time is not required)
# new_clazz = base_clazz.make_new_type(name=base_clazz_name)
new_clazz = type(base_clazz_name, (base_clazz,), {})
return new_clazz # base_clazz
def field_decl(evaluator, ast, state):
"""Evaluates field declaration for struct type."""
prefixes, clazz = evaluator.eval_ast(ast["type"], state)
variables = []
for var in ast["varData"]:
variable_id_data = evaluator.eval_ast(var, state)
var_name = variable_id_data["varName"]
array_dims = variable_id_data["arrayDecl"]
if len(array_dims) > 0:
clazz = UppaalArray.make_new_type(name="array", dims=array_dims, clazz=clazz)
variables.append((var_name, clazz))
return variables
###
def function_def(evaluator, ast, state):
"""Evaluates "type func_name(....) { ... }"."""
func_name = ast["name"]
prefixes, clazz = evaluator.eval_ast(ast["type"], state)
func_obj = UppaalFunction(func_name, ast, clazz, evaluator)
state.add(func_name, func_obj, const=True)
def statement_block(evaluator, ast, state):
"""Evaluates statement block "{ ... }"."""
state.new_local_scope()
for decl in ast["decls"]:
evaluator.eval_ast(decl, state)
for stmt in ast["stmts"]:
res, do_return = evaluator.eval_ast(stmt, state)
if do_return:
state.remove_local_scope()
return res, True
state.remove_local_scope()
return None, False
def empty_statement(_evaluator, _ast, _state):
"""Evaluates empty statement."""
return None, False
def expr_statement(evaluator, ast, state):
"""Evaluates "expr;"."""
res = evaluator.eval_ast(ast["expr"], state)
return res, False
def for_loop(evaluator, ast, state):
"""Evaluates "for (init; cond; after) {body}"."""
evaluator.eval_ast(ast["init"], state)
while evaluator.eval_ast(ast["cond"], state):
res, do_return = evaluator.eval_ast(ast["body"], state)
if do_return:
return res, True
evaluator.eval_ast(ast["after"], state)
return None, False
def iteration(evaluator, ast, state):
"""Evaluates "for (name : type) {body}"."""
var_name = ast["name"]
prefixes, clazz = evaluator.eval_ast(ast["type"], state)
state.new_local_scope()
state.define(var_name, clazz)
for val in clazz:
state.assign(var_name, val)
res, do_return = evaluator.eval_ast(ast["body"], state)
if do_return:
return res, True
state.remove_local_scope()
return None, False
def while_loop(evaluator, ast, state):
"""Evaluates "while (cond) {body}"."""
while evaluator.eval_ast(ast["cond"], state):
res, do_return = evaluator.eval_ast(ast["body"], state)
if do_return:
return res, True
return None, False
def do_while_loop(evaluator, ast, state):
"""Evaluates "do {body} while (cond)"."""
while True:
res, do_return = evaluator.eval_ast(ast["body"], state)
if do_return:
return res, True
if not evaluator.eval_ast(ast["cond"], state):
break
return None, False
def if_statement(evaluator, ast, state):
"""Evaluates "if (cond) {thenBody} [else {elseBody}]"."""
if evaluator.eval_ast(ast["cond"], state):
res, do_return = evaluator.eval_ast(ast["thenBody"], state)
if do_return:
return res, True
else:
if ast.get("elseBody"):
res, do_return = evaluator.eval_ast(ast["elseBody"], state)
if do_return:
return res, True
return None, False
def return_statement(evaluator, ast, state):
"""Evaluates "return expr;"."""
if ast.get("expr"):
res = evaluator.eval_ast(ast["expr"], state)
else:
res = None
return res, True # Second entry represents "do_return" state, and propagates upwards through statements
###
def parameter(evaluator, ast, state):
"""Evaluates a function parameter."""
var_ast = ast["varData"]
var_id_data = evaluator.eval_ast(var_ast, state)
var_name = var_id_data["varName"]
if ast["isRef"] == '&': # If parameter is a reference
state.add(var_name, None)
else: # If parameter is NOT a reference
prefixes, clazz = evaluator.eval_ast(ast["type"], state)
var_clazz = clazz
new_val = var_clazz()
new_var = UppaalVariable(name=var_name, val=new_val)
state.add(var_name, new_var, const=("const" in prefixes or "meta" in prefixes))
def system(_evaluator, ast, _state):
"""Evaluates the instance system initialization."""
return ast["processNames"] # TODO: Implementation
def process(_evaluator, ast, _state):
"""Evaluates a single process."""
return ast # TODO: Implementation
def instantiation(evaluator, ast, state):
"""Evaluates "Inst(params) = Tmpl(args)"."""
instance_name = ast["instanceName"]
if ast.get("params"):
params = list(map(lambda param: evaluator.eval_ast(param, state), ast["params"]))
else:
params = []
template_name = ast["templateName"]
args = ast["args"] # list(map(lambda arg: evaluator.eval_ast(arg, state), ast["args"]))
return {"instance_name": instance_name, "params": params, "template_name": template_name,
"args": args}
# def progress_decl(_evaluator, _ast, _state):
# """Evaluates progress declaration."""
# return # TODO: Implementation
###
# def gantt_decl(_evaluator, _ast, _state):
# return # TODO: Implementation
#
#
# def gantt_def(_evaluator, _ast, _state):
# return # TODO: Implementation
#
#
# def gantt_args(_evaluator, _ast, _state):
# return # TODO: Implementation
#
#
# def gantt_entry_elem(_evaluator, _ast, _state):
# return # TODO: Implementation
#
#
# def gantt_decl_select(_evaluator, _ast, _state):
# return # TODO: Implementation
#
#
# def gantt_expr_list(_evaluator, _ast, _state):
# return # TODO: Implementation
#
#
# def gantt_expr_single(_evaluator, _ast, _state):
# return # TODO: Implementation
#
#
# def gantt_expr_select(_evaluator, _ast, _state):
# return # TODO: Implementation
#
#
# def gantt_entry_select(_evaluator, _ast, _state):
# return # TODO: Implementation
###
# def chan_priority(_evaluator, _ast, _state):
# return # TODO: Implementation
#
#
# def chan_expr(_evaluator, _ast, _state):
# return # TODO: Implementation
#
#
# def chan_default(_evaluator, _ast, _state):
# return # TODO: Implementation
###
def variable(_evaluator, ast, state):
"""Evaluates a variable in an expression."""
name_str = ast["name"]
var = state.get(name_str)
if isinstance(var.val, UppaalReference):
var = var.val.pointee
return var
def integer(_evaluator, ast, _state):
"""Evaluates an integer value."""
return UppaalInt(ast["val"])
def double(_evaluator, _ast, _state):
"""Evaluates a double value."""
return # TODO: Uppaal_double(float(ast["val"]))
def boolean(_evaluator, ast, _state):
"""Evaluates a boolean value."""
return UppaalBool(ast["val"])
###
def bracket_expr(evaluator, ast, state):
"""Evaluates expression "(expr)"."""
res = evaluator.eval_ast(ast["expr"], state)
return res
def derivative_expr(_evaluator, _ast, _state):
"""Evaluates expression "expr'"."""
return # TODO: Implementation
def post_incr_assign_expr(evaluator, ast, state):
"""Evaluates expression "expr++"."""
var = evaluator.eval_ast(ast["expr"], state)
ret = var.val.copy()
var += 1
return ret
def post_decr_assign_expr(evaluator, ast, state):
"""Evaluates expression "expr--"."""
var = evaluator.eval_ast(ast["expr"], state)
ret = var.val.copy()
var -= 1
return ret
def pre_incr_assign_expr(evaluator, ast, state):
"""Evaluates expression "++expr"."""
var = evaluator.eval_ast(ast["expr"], state)
var += 1
return var.val
def pre_decr_assign_expr(evaluator, ast, state):
"""Evaluates expression "--expr"."""
var = evaluator.eval_ast(ast["expr"], state)
var -= 1
return var.val
def assign_expr(evaluator, ast, state):
"""Evaluates expression "var = expr"."""
return assign_eval_funcs[ast["op"]](evaluator, ast, state)
def func_call_expr(evaluator, ast, state):
"""Evaluates expression "func(args)"."""
func_name = ast["funcName"]
func_obj = state.get(func_name)
args = list(map(lambda arg: evaluator.eval_ast(arg, state), ast["args"]))
res = func_obj(arg_asts=args, state=state)
return res
###
def unary_expr(evaluator, ast, state):
"""Evaluates a unary expression."""
res = unary_eval_funcs[ast["op"]](evaluator, ast, state)
return res
def binary_expr(evaluator, ast, state):
"""Evaluates a binary expression."""
res = binary_eval_funcs[ast["op"]](evaluator, ast, state)
return res
def ternary_expr(evaluator, ast, state):
"""Evaluates a ternary expression."""
if evaluator.eval_ast(ast["left"], state): # evaluator.eval_ast(ast["cond"], state):
res = evaluator.eval_ast(ast["middle"], state) # evaluator.eval_ast(ast["thenExpr"], state)
return res
else:
res = evaluator.eval_ast(ast["right"], state) # evaluator.eval_ast(ast["elseExpr"], state)
return res
# def deadlock_expr(_evaluator, _ast, _state):
# """Evaluates a deadlock expression."""
# return
###
def for_all_expr(evaluator, ast, state):
"""Evaluates expression "forall (name:type) expr"."""
var_name = ast["varName"]
prefixes, clazz = evaluator.eval_ast(ast["type"], state)
state.new_local_scope()
state.define(var_name, clazz)
for val in clazz:
state.assign(var_name, val)
bool_res = evaluator.eval_ast(ast["expr"], state)
if not bool_res:
state.remove_local_scope()
return UppaalBool(False)
state.remove_local_scope()
return UppaalBool(True)
def exists_expr(evaluator, ast, state):
"""Evaluates expression "exists (name:type) expr"."""
var_name = ast["varName"]
prefixes, clazz = evaluator.eval_ast(ast["type"], state)
state.new_local_scope()
state.define(var_name, clazz)
for val in clazz:
state.assign(var_name, val)
bool_res = evaluator.eval_ast(ast["expr"], state)
if bool_res:
state.remove_local_scope()
return UppaalBool(True)
state.remove_local_scope()
return UppaalBool(False)
def sum_expr(evaluator, ast, state):
"""Evaluates expression "sum (name:type) expr"."""
var_name = ast["varName"]
prefixes, clazz = evaluator.eval_ast(ast["type"], state)
state.new_local_scope()
state.define(var_name, clazz)
res = UppaalInt(0)
for val in clazz:
state.assign(var_name, val)
int_res = evaluator.eval_ast(ast["expr"], state)
res += int_res
state.remove_local_scope()
return res
###
def invariant(_evaluator, _ast, _state):
"""Evaluates an invariant "ti - tj <= c"."""
return # TODO: Implementation
def select(evaluator, ast, state):
"""Evaluates a select statement "name : type"."""
var_name = ast["name"]
prefixes, clazz = evaluator.eval_ast(ast["type"], state)
var = UppaalVariable(name=var_name, val=clazz())
state.add(var_name, var)
def guard(_evaluator, _ast, _state):
"""Evaluates a guard "ti - tj <= c"."""
return # TODO: Implementation
def sync(_evaluator, _ast, _state):
"""Evaluates a synchronization "chan(!|?)"."""
return # TODO: Implementation
def update(evaluator, ast, state):
"""Evaluates an update "var = expr" or "func()"."""
evaluator.eval_ast(ast["expr"], state)
################################
# Function Lookup Dictionaries #
################################
unary_eval_funcs = {
"Plus": plus,
"Minus": minus,
"LogNot": log_not,
}
binary_eval_funcs = {
"Dot": dot,
"ArrayAccess": array_access,
"Add": add,
"Sub": sub,
"Mult": mult,
"Div": div,
"Mod": mod,
"LShift": l_shift,
"RShift": r_shift,
"LogAnd": log_and,
"LogOr": log_or,
"LogImply": log_imply,
"BitAnd": bit_and,
"BitOr": bit_or,
"BitXor": bit_xor,
"Minimum": minimum,
"Maximum": maximum,
"GreaterEqual": greater_equal,
"GreaterThan": greater_than,
"LessEqual": less_equal,
"LessThan": less_than,
"Equal": equal,
"NotEqual": not_equal,
}
assign_eval_funcs = {
"Assign": assign,
"AddAssign": add_assign,
"SubAssign": sub_assign,
"MultAssign": mult_assign,
"DivAssign": div_assign,
"ModAssign": mod_assign,
"LShiftAssign": l_shift_assign,
"RShiftAssign": r_shift_assign,
"BitAndAssign": bit_and_assign,
"BitOrAssign": bit_or_assign,
"BitXorAssign": bit_xor_assign,
}
eval_funcs = {
"UppaalDeclaration": uppaal_declaration,
"UppaalSystemDeclaration": uppaal_system_declaration,
# "LineComment": line_comment,
# "BlockComment": block_comment,
"VariableDecls": variable_decls,
"VariableID": variable_id,
"InitialiserArray": initialiser_array,
"TypeDecls": type_decls,
"Type": type_,
"BoundedIntType": bounded_int_type,
"ScalarType": scalar_type,
"StructType": struct_type,
"CustomType": custom_type,
"FieldDecl": field_decl,
"FunctionDef": function_def,
"StatementBlock": statement_block,
"EmptyStatement": empty_statement,
"ExprStatement": expr_statement,
"ForLoop": for_loop,
"Iteration": iteration,
"WhileLoop": while_loop,
"DoWhileLoop": do_while_loop,
"IfStatement": if_statement,
"ReturnStatement": return_statement,
"Parameter": parameter,
"System": system,
"Process": process,
"Instantiation": instantiation,
# "ProgressDecl": progress_decl,
# "GanttDecl": gantt_decl,
# | |
<reponame>jbalint/spark
"""
This module contains the core classes of version 2.0 of SAX for Python.
This file provides only default classes with absolutely minimum
functionality, from which drivers and applications can be subclassed.
Many of these classes are empty and are included only as documentation
of the interfaces.
$Id: handler.py,v 1.5 2002/02/14 08:09:36 loewis Exp $
"""
version = '2.0beta'
#============================================================================
#
# HANDLER INTERFACES
#
#============================================================================
# ===== ERRORHANDLER =====
class ErrorHandler:
"""Basic interface for SAX error handlers.
If you create an object that implements this interface, then
register the object with your XMLReader, the parser will call the
methods in your object to report all warnings and errors. There
are three levels of errors available: warnings, (possibly)
recoverable errors, and unrecoverable errors. All methods take a
SAXParseException as the only parameter."""
def error(self, exception):
"Handle a recoverable error."
raise exception
def fatalError(self, exception):
"Handle a non-recoverable error."
raise exception
def warning(self, exception):
"Handle a warning."
print exception
# ===== CONTENTHANDLER =====
class ContentHandler:
"""Interface for receiving logical document content events.
This is the main callback interface in SAX, and the one most
important to applications. The order of events in this interface
mirrors the order of the information in the document."""
def __init__(self):
self._locator = None
def setDocumentLocator(self, locator):
"""Called by the parser to give the application a locator for
locating the origin of document events.
SAX parsers are strongly encouraged (though not absolutely
required) to supply a locator: if it does so, it must supply
the locator to the application by invoking this method before
invoking any of the other methods in the DocumentHandler
interface.
The locator allows the application to determine the end
position of any document-related event, even if the parser is
not reporting an error. Typically, the application will use
this information for reporting its own errors (such as
character content that does not match an application's
business rules). The information returned by the locator is
probably not sufficient for use with a search engine.
Note that the locator will return correct information only
during the invocation of the events in this interface. The
application should not attempt to use it at any other time."""
self._locator = locator
def startDocument(self):
"""Receive notification of the beginning of a document.
The SAX parser will invoke this method only once, before any
other methods in this interface or in DTDHandler (except for
setDocumentLocator)."""
def endDocument(self):
"""Receive notification of the end of a document.
The SAX parser will invoke this method only once, and it will
be the last method invoked during the parse. The parser shall
not invoke this method until it has either abandoned parsing
(because of an unrecoverable error) or reached the end of
input."""
def startPrefixMapping(self, prefix, uri):
"""Begin the scope of a prefix-URI Namespace mapping.
The information from this event is not necessary for normal
Namespace processing: the SAX XML reader will automatically
replace prefixes for element and attribute names when the
http://xml.org/sax/features/namespaces feature is true (the
default).
There are cases, however, when applications need to use
prefixes in character data or in attribute values, where they
cannot safely be expanded automatically; the
start/endPrefixMapping event supplies the information to the
application to expand prefixes in those contexts itself, if
necessary.
Note that start/endPrefixMapping events are not guaranteed to
be properly nested relative to each-other: all
startPrefixMapping events will occur before the corresponding
startElement event, and all endPrefixMapping events will occur
after the corresponding endElement event, but their order is
not guaranteed."""
def endPrefixMapping(self, prefix):
"""End the scope of a prefix-URI mapping.
See startPrefixMapping for details. This event will always
occur after the corresponding endElement event, but the order
of endPrefixMapping events is not otherwise guaranteed."""
def startElement(self, name, attrs):
"""Signals the start of an element in non-namespace mode.
The name parameter contains the raw XML 1.0 name of the
element type as a string and the attrs parameter holds an
instance of the Attributes class containing the attributes of
the element."""
def endElement(self, name):
"""Signals the end of an element in non-namespace mode.
The name parameter contains the name of the element type, just
as with the startElement event."""
def startElementNS(self, name, qname, attrs):
"""Signals the start of an element in namespace mode.
The name parameter contains the name of the element type as a
(uri, localname) tuple, the qname parameter the raw XML 1.0
name used in the source document, and the attrs parameter
holds an instance of the Attributes class containing the
attributes of the element.
The uri part of the name tuple is None for elements which have
no namespace."""
def endElementNS(self, name, qname):
"""Signals the end of an element in namespace mode.
The name parameter contains the name of the element type, just
as with the startElementNS event."""
def characters(self, content):
"""Receive notification of character data.
The Parser will call this method to report each chunk of
character data. SAX parsers may return all contiguous
character data in a single chunk, or they may split it into
several chunks; however, all of the characters in any single
event must come from the same external entity so that the
Locator provides useful information."""
def ignorableWhitespace(self, whitespace):
"""Receive notification of ignorable whitespace in element content.
Validating Parsers must use this method to report each chunk
of ignorable whitespace (see the W3C XML 1.0 recommendation,
section 2.10): non-validating parsers may also use this method
if they are capable of parsing and using content models.
SAX parsers may return all contiguous whitespace in a single
chunk, or they may split it into several chunks; however, all
of the characters in any single event must come from the same
external entity, so that the Locator provides useful
information.
The application must not attempt to read from the array
outside of the specified range."""
def processingInstruction(self, target, data):
"""Receive notification of a processing instruction.
The Parser will invoke this method once for each processing
instruction found: note that processing instructions may occur
before or after the main document element.
A SAX parser should never report an XML declaration (XML 1.0,
section 2.8) or a text declaration (XML 1.0, section 4.3.1)
using this method."""
def skippedEntity(self, name):
"""Receive notification of a skipped entity.
The Parser will invoke this method once for each entity
skipped. Non-validating processors may skip entities if they
have not seen the declarations (because, for example, the
entity was declared in an external DTD subset). All processors
may skip external entities, depending on the values of the
http://xml.org/sax/features/external-general-entities and the
http://xml.org/sax/features/external-parameter-entities
properties."""
# ===== DTDHandler =====
class DTDHandler:
"""Handle DTD events.
This interface specifies only those DTD events required for basic
parsing (unparsed entities and attributes)."""
def notationDecl(self, name, publicId, systemId):
"Handle a notation declaration event."
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
"Handle an unparsed entity declaration event."
# ===== ENTITYRESOLVER =====
class EntityResolver:
"""Basic interface for resolving entities. If you create an object
implementing this interface, then register the object with your
Parser, the parser will call the method in your object to
resolve all external entities. Note that DefaultHandler implements
this interface with the default behaviour."""
def resolveEntity(self, publicId, systemId):
"""Resolve the system identifier of an entity and return either
the system identifier to read from as a string, or an InputSource
to read from."""
return systemId
#============================================================================
#
# CORE FEATURES
#
#============================================================================
feature_namespaces = "http://xml.org/sax/features/namespaces"
# true: Perform Namespace processing (default).
# false: Optionally do not perform Namespace processing
# (implies namespace-prefixes).
# access: (parsing) read-only; (not parsing) read/write
feature_namespace_prefixes = "http://xml.org/sax/features/namespace-prefixes"
# true: Report the original prefixed names and attributes used for Namespace
# declarations.
# false: Do not report attributes used for Namespace declarations, and
# optionally do not report original prefixed names (default).
# access: (parsing) read-only; (not parsing) read/write
feature_string_interning = "http://xml.org/sax/features/string-interning"
# true: All element names, prefixes, attribute names, Namespace URIs, and
# local names are interned using the built-in intern function.
# false: Names are not necessarily interned, although they may be (default).
# access: (parsing) | |
0, 0, 0, 0],
[1529, 8.704895, 0, 9999, -9999, 1.0, 100, 1, 84.378012, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1530, 2.692923, 0, 9999, -9999, 1.0, 100, 1, 79.055155, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1531, 104.653862, 0, 9999, -9999, 1.0, 100, 1, 183.821409, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1532, 2.125498, 0, 9999, -9999, 1.0, 100, 1, 37.379033, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1534, 1.420342, 0, 9999, -9999, 1.0, 100, 1, 29.516607, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1535, 0.603212, 0, 9999, -9999, 1.0, 100, 1, 8.931779, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1536, 2.335948, 0, 9999, -9999, 1.0, 100, 1, 39.26145, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1537, 4.551805, 0, 9999, -9999, 1.0, 100, 1, 99.740166, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1538, 51.465034, 0, 9999, -9999, 1.0, 100, 1, 130.774402, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1539, 102.043652, 0, 9999, -9999, 1.0, 100, 1, 201.766963, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1540, 1.754708, 0, 9999, -9999, 1.0, 100, 1, 4.160189, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1541, 1.271256, 0, 9999, -9999, 1.0, 100, 1, 3.429917, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1542, 22.605282, 0, 9999, -9999, 1.0, 100, 1, 50.287947, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1543, 0.565086, 0, 9999, -9999, 1.0, 100, 1, 14.788669, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1544, 23.693358, 0, 9999, -9999, 1.0, 100, 1, 121.437126, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1545, 81.994741, 0, 9999, -9999, 1.0, 100, 1, 185.545128, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1546, 82.218967, 0, 9999, -9999, 1.0, 100, 1, 255.44343, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1547, 67.614723, 0, 9999, -9999, 1.0, 100, 1, 362.597919, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1548, 0.721249, 0, 9999, -9999, 1.0, 100, 1, 21.273779, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1549, 3.298956, 0, 9999, -9999, 1.0, 100, 1, 77.017486, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1550, 0.163234, 0, 9999, -9999, 1.0, 100, 1, 5.214715, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1551, 0.304981, 0, 9999, -9999, 1.0, 100, 1, 9.576491, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1552, 19.070388, 0, 9999, -9999, 1.0, 100, 1, 54.035471, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1553, 40.759532, 0, 9999, -9999, 1.0, 100, 1, 92.480282, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1554, 44.242819, 0, 9999, -9999, 1.0, 100, 1, 155.333413, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1555, 60.12844, 0, 9999, -9999, 1.0, 100, 1, 103.865774, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1556, 12.322818, 0, 9999, -9999, 1.0, 100, 1, 40.376346, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1557, 8.46835, 0, 9999, -9999, 1.0, 100, 1, 25.990242, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1558, 2.509741, 0, 9999, -9999, 1.0, 100, 1, 24.622373, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1559, 36.528878, 0, 9999, -9999, 1.0, 100, 1, 112.609207, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1560, 22.366278, 0, 9999, -9999, 1.0, 100, 1, 86.395942, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1561, 6.93291, 0, 9999, -9999, 1.0, 100, 1, 19.127379, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1562, 4.09189, 0, 9999, -9999, 1.0, 100, 1, 61.888351, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1563, 35.327707, 0, 9999, -9999, 1.0, 100, 1, 106.233907, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1564, 30.749499, 0, 9999, -9999, 1.0, 100, 1, 58.27282, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1565, 5.51138, 0, 9999, -9999, 1.0, 100, 1, 12.83938, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1566, 119.367748, 0, 9999, -9999, 1.0, 100, 1, 358.676351, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1567, 1.980328, 0, 9999, -9999, 1.0, 100, 1, 29.531771, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1568, 7.382762, 0, 9999, -9999, 1.0, 100, 1, 89.300597, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1569, 142.565953, 0, 9999, -9999, 1.0, 100, 1, 328.718571, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1570, 120.200317, 0, 9999, -9999, 1.0, 100, 1, 243.241909, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1571, 56.898408, 0, 9999, -9999, 1.0, 100, 1, 203.443403, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1572, 115.686694, 0, 9999, -9999, 1.0, 100, 1, 232.127956, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1573, 37.277333, 0, 9999, -9999, 1.0, 100, 1, 80.403772, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1574, 51.959213, 0, 9999, -9999, 1.0, 100, 1, 144.715972, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1575, 60.442501, 0, 9999, -9999, 1.0, 100, 1, 153.606376, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1576, 14.073028, 0, 9999, -9999, 1.0, 100, 1, 34.262017, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1577, 17.686192, 0, 9999, -9999, 1.0, 100, 1, 217.054488, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1578, 6.361027, 0, 9999, -9999, 1.0, 100, 1, 16.348222, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1579, 1.796998, 0, 9999, -9999, 1.0, 100, 1, 35.164333, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1580, 0.969771, 0, 9999, -9999, 1.0, 100, 1, 21.892492, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1581, 76.770758, 0, 9999, -9999, 1.0, 100, 1, 156.277964, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1582, 4.627554, 0, 9999, -9999, 1.0, 100, 1, 8.151092, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1583, 1.013582, 0, 9999, -9999, 1.0, 100, 1, 1.791968, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1584, 32.201187, 0, 9999, -9999, 1.0, 100, 1, 81.24993, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1585, 1.813983, 0, 9999, -9999, 1.0, 100, 1, 3.685182, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1586, 32.269635, 0, 9999, -9999, 1.0, 100, 1, 61.31549, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1587, 98.037204, 0, 9999, -9999, 1.0, 100, 1, 191.635296, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1588, 23.404473, 0, 9999, -9999, 1.0, 100, 1, 59.424343, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1589, 9.649174, 0, 9999, -9999, 1.0, 100, 1, 48.538268, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1590, 34.170018, 0, 9999, -9999, 1.0, 100, 1, 119.077525, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1591, 31.925348, 0, 9999, -9999, 1.0, 100, 1, 142.8447, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1592, 5.561961, 0, 9999, -9999, 1.0, 100, 1, 9.842361, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1593, 4.064612, 0, 9999, -9999, 1.0, 100, 1, 7.183183, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1594, 5.400375, 0, 9999, -9999, 1.0, 100, 1, 9.56089, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1595, 28.62726, 0, 9999, -9999, 1.0, 100, 1, 54.79001, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1596, 57.027743, 0, 9999, -9999, 1.0, 100, 1, 138.730049, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1597, 1.60468, 0, 9999, -9999, 1.0, 100, 1, 2.858987, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1598, 2.718822, 0, 9999, -9999, 1.0, 100, 1, 4.795494, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1599, 29.569389, 0, 9999, -9999, 1.0, 100, 1, 86.703571, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1600, 14.712936, 0, 9999, -9999, 1.0, 100, 1, 25.356501, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1601, 4.138944, 0, 9999, -9999, 1.0, 100, 1, 7.643653, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1602, 22.706117, 0, 9999, -9999, 1.0, 100, 1, 45.658169, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1603, 15.382223, 0, 9999, -9999, 1.0, 100, 1, 26.209248, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1604, 9.688412, 0, 9999, -9999, 1.0, 100, 1, 16.363032, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1605, 25.823012, 0, 9999, -9999, 1.0, 100, 1, 43.477178, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1606, 22.584798, 0, 9999, -9999, 1.0, 100, 1, 42.024907, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1607, 11.485301, 0, 9999, -9999, 1.0, 100, 1, 19.395236, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1608, 10.729219, 0, 9999, -9999, 1.0, 100, 1, 19.491249, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1609, 3.210834, 0, 9999, -9999, 1.0, 100, 1, 6.052272, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1610, 10.193242, 0, 9999, -9999, 1.0, 100, 1, 18.571656, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1611, 3.490222, 0, 9999, -9999, 1.0, 100, 1, 6.420554, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1612, 5.75046, 0, 9999, -9999, 1.0, 100, 1, 10.811203, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1613, 15.275647, 0, 9999, -9999, 1.0, 100, 1, 27.976217, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1614, 15.393881, 0, 9999, -9999, 1.0, 100, 1, 28.183827, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1615, 93.892032, 0, 9999, -9999, 1.0, 100, 1, 193.234776, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1616, 3.890387, 0, 9999, -9999, 1.0, 100, 1, 6.865586, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1617, 6.026296, 0, 9999, -9999, 1.0, 100, 1, 10.63107, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1618, 2.790265, 0, 9999, -9999, 1.0, 100, 1, 4.920368, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1619, 3.792149, 0, 9999, -9999, 1.0, 100, 1, 6.689637, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1620, 1.084317, 0, 9999, -9999, 1.0, 100, 1, 1.912024, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1621, 4.274053, 0, 9999, -9999, 1.0, 100, 1, 8.056388, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1622, 3.020597, 0, 9999, -9999, 1.0, 100, 1, 5.693597, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1623, 11.470276, 0, 9999, -9999, 1.0, 100, 1, 20.717111, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1624, 4.922505, 0, 9999, -9999, 1.0, 100, 1, 8.938454, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1625, 33.876307, 0, 9999, -9999, 1.0, 100, 1, 65.182465, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1626, 6.446651, 0, 9999, -9999, 1.0, 100, 1, 11.878862, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1627, 5.757286, 0, 9999, -9999, 1.0, 100, 1, 10.196496, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1628, 31.895799, 0, 9999, -9999, 1.0, 100, 1, 66.613993, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1629, 67.295304, 0, 9999, -9999, 1.0, 100, 1, 121.671047, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1630, 6.71729, 0, 9999, -9999, 1.0, 100, 1, 12.452584, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1631, 17.355646, 0, 9999, -9999, 1.0, 100, 1, 32.486249, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1632, 15.125213, 0, 9999, -9999, 1.0, 100, 1, 25.874893, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1633, 32.799309, 0, 9999, -9999, 1.0, 100, 1, 67.433329, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1634, 5.115801, 0, 9999, -9999, 1.0, 100, 1, 9.643044, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1635, 11.160851, 0, 9999, -9999, 1.0, 100, 1, 19.166135, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1636, 13.536604, 0, 9999, -9999, 1.0, 100, 1, 25.181406, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1637, 16.298847, 0, 9999, -9999, 1.0, 100, 1, 29.114828, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1638, 6.806951, 0, 9999, -9999, 1.0, 100, 1, 12.162188, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1639, 16.506856, 0, 9999, -9999, 1.0, 100, 1, 29.183593, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1640, 1.264443, 0, 9999, -9999, 1.0, 100, 1, 2.237652, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1641, 2.838477, 0, 9999, -9999, 1.0, 100, 1, 5.023705, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1642, 6.627996, 0, 9999, -9999, 1.0, 100, 1, 11.730623, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1643, 1.931834, 0, 9999, -9999, 1.0, 100, 1, 3.417684, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1644, 6.735558, 0, 9999, -9999, 1.0, 100, 1, 11.76596, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1645, 6.302909, 0, 9999, -9999, 1.0, 100, 1, 11.144882, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1646, 2.11504, 0, 9999, -9999, 1.0, 100, 1, 3.73271, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1647, 9.92692, 0, 9999, -9999, 1.0, 100, 1, 17.434827, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1648, 28.233695, 0, 9999, -9999, 1.0, 100, 1, 109.345623, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1649, 5.151655, 0, 9999, -9999, 1.0, 100, 1, 23.481556, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1650, 11.474903, 0, 9999, -9999, 1.0, 100, 1, 176.928964, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1651, 6.559486, 0, 9999, -9999, 1.0, 100, 1, 161.276649, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1652, 15.754088, 0, 9999, -9999, 1.0, 100, 1, 84.070562, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1653, 0.859041, 0, 9999, -9999, 1.0, 100, 1, 18.431241, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1654, 2.867929, 0, 9999, -9999, 1.0, 100, 1, 47.53021, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1655, 6.126591, 0, 9999, -9999, 1.0, 100, 1, 10.79071, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1656, 1.503676, 0, 9999, -9999, 1.0, 100, 1, 2.680105, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1657, 3.159334, 0, 9999, -9999, 1.0, 100, 1, 5.6313, 0.0, 0, 0, 0, 0, 0, 0, 0, | |
"""Objects representing regions in space."""
import math
import random
import itertools
import numpy
import scipy.spatial
import shapely.geometry
import shapely.ops
from scenic.core.distributions import Samplable, RejectionException, needsSampling
from scenic.core.lazy_eval import valueInContext
from scenic.core.vectors import Vector, OrientedVector, VectorDistribution
from scenic.core.geometry import RotatedRectangle
from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors
from scenic.core.geometry import headingOfSegment, triangulatePolygon, plotPolygon, polygonUnion
from scenic.core.type_support import toVector
from scenic.core.utils import cached, areEquivalent
def toPolygon(thing):
if needsSampling(thing):
return None
if hasattr(thing, 'polygon'):
return thing.polygon
if hasattr(thing, 'polygons'):
return thing.polygons
if hasattr(thing, 'lineString'):
return thing.lineString
return None
def regionFromShapelyObject(obj, orientation=None):
"""Build a 'Region' from Shapely geometry."""
assert obj.is_valid, obj
if obj.is_empty:
return nowhere
elif isinstance(obj, (shapely.geometry.Polygon, shapely.geometry.MultiPolygon)):
return PolygonalRegion(polygon=obj, orientation=orientation)
elif isinstance(obj, (shapely.geometry.LineString, shapely.geometry.MultiLineString)):
return PolylineRegion(polyline=obj, orientation=orientation)
else:
raise RuntimeError(f'unhandled type of Shapely geometry: {obj}')
class PointInRegionDistribution(VectorDistribution):
"""Uniform distribution over points in a Region"""
def __init__(self, region):
super().__init__(region)
self.region = region
def sampleGiven(self, value):
return value[self.region].uniformPointInner()
def __str__(self):
return f'PointIn({self.region})'
class Region(Samplable):
"""Abstract class for regions."""
def __init__(self, name, *dependencies, orientation=None):
super().__init__(dependencies)
self.name = name
self.orientation = orientation
def sampleGiven(self, value):
return self
def intersect(self, other, triedReversed=False):
"""Get a `Region` representing the intersection of this one with another."""
if triedReversed:
return IntersectionRegion(self, other)
else:
return other.intersect(self, triedReversed=True)
@staticmethod
def uniformPointIn(region):
"""Get a uniform `Distribution` over points in a `Region`."""
return PointInRegionDistribution(region)
def uniformPoint(self):
"""Sample a uniformly-random point in this `Region`.
Can only be called on fixed Regions with no random parameters.
"""
assert not needsSampling(self)
return self.uniformPointInner()
def uniformPointInner(self):
"""Do the actual random sampling. Implemented by subclasses."""
raise NotImplementedError()
def containsPoint(self, point):
"""Check if the `Region` contains a point. Implemented by subclasses."""
raise NotImplementedError()
def containsObject(self, obj):
"""Check if the `Region` contains an :obj:`~scenic.core.object_types.Object`.
The default implementation assumes the `Region` is convex; subclasses must
override the method if this is not the case.
"""
for corner in obj.corners:
if not self.containsPoint(corner):
return False
return True
def __contains__(self, thing):
"""Check if this `Region` contains an object or vector."""
from scenic.core.object_types import Object
if isinstance(thing, Object):
return self.containsObject(thing)
vec = toVector(thing, '"X in Y" with X not an Object or a vector')
return self.containsPoint(vec)
def getAABB(self):
"""Axis-aligned bounding box for this `Region`. Implemented by some subclasses."""
raise NotImplementedError()
def orient(self, vec):
"""Orient the given vector along the region's orientation, if any."""
if self.orientation is None:
return vec
else:
return OrientedVector(vec.x, vec.y, self.orientation[vec])
def __str__(self):
return f'<Region {self.name}>'
class AllRegion(Region):
"""Region consisting of all space."""
def intersect(self, other, triedReversed=False):
return other
def containsPoint(self, point):
return True
def containsObject(self, obj):
return True
def __eq__(self, other):
return type(other) is AllRegion
def __hash__(self):
return hash(AllRegion)
class EmptyRegion(Region):
"""Region containing no points."""
def intersect(self, other, triedReversed=False):
return self
def uniformPointInner(self):
raise RejectionException(f'sampling empty Region')
def containsPoint(self, point):
return False
def containsObject(self, obj):
return False
def show(self, plt, style=None):
pass
def __eq__(self, other):
return type(other) is EmptyRegion
def __hash__(self):
return hash(EmptyRegion)
everywhere = AllRegion('everywhere')
nowhere = EmptyRegion('nowhere')
class CircularRegion(Region):
def __init__(self, center, radius, resolution=32):
super().__init__('Circle', center, radius)
self.center = center.toVector()
self.radius = radius
self.circumcircle = (self.center, self.radius)
if not (needsSampling(self.center) or needsSampling(self.radius)):
ctr = shapely.geometry.Point(self.center)
self.polygon = ctr.buffer(self.radius, resolution=resolution)
def sampleGiven(self, value):
return CircularRegion(value[self.center], value[self.radius])
def evaluateInner(self, context):
center = valueInContext(self.center, context)
radius = valueInContext(self.radius, context)
return CircularRegion(center, radius)
def containsPoint(self, point):
point = point.toVector()
return point.distanceTo(self.center) <= self.radius
def uniformPointInner(self):
x, y = self.center
r = random.triangular(0, self.radius, self.radius)
t = random.uniform(-math.pi, math.pi)
pt = Vector(x + (r * cos(t)), y + (r * sin(t)))
return self.orient(pt)
def getAABB(self):
x, y = self.center
r = self.radius
return ((x - r, y - r), (x + r, y + r))
def isEquivalentTo(self, other):
if type(other) is not CircularRegion:
return False
return (areEquivalent(other.center, self.center)
and areEquivalent(other.radius, self.radius))
def __str__(self):
return f'CircularRegion({self.center}, {self.radius})'
class SectorRegion(Region):
def __init__(self, center, radius, heading, angle, resolution=32):
super().__init__('Sector', center, radius, heading, angle)
self.center = center.toVector()
self.radius = radius
self.heading = heading
self.angle = angle
r = (radius / 2) * cos(angle / 2)
self.circumcircle = (self.center.offsetRadially(r, heading), r)
if not any(needsSampling(x) for x in (self.center, radius, heading, angle)):
ctr = shapely.geometry.Point(self.center)
circle = ctr.buffer(self.radius, resolution=resolution)
if angle >= math.tau - 0.001:
self.polygon = circle
else:
mask = shapely.geometry.Polygon([
self.center,
self.center.offsetRadially(radius, heading + angle/2),
self.center.offsetRadially(2*radius, heading),
self.center.offsetRadially(radius, heading - angle/2)
])
self.polygon = circle & mask
def sampleGiven(self, value):
return SectorRegion(value[self.center], value[self.radius],
value[self.heading], value[self.angle])
def evaluateInner(self, context):
center = valueInContext(self.center, context)
radius = valueInContext(self.radius, context)
heading = valueInContext(self.heading, context)
angle = valueInContext(self.angle, context)
return SectorRegion(center, radius, heading, angle)
def containsPoint(self, point):
point = point.toVector()
if not pointIsInCone(tuple(point), tuple(self.center), self.heading, self.angle):
return False
return point.distanceTo(self.center) <= self.radius
def uniformPointInner(self):
x, y = self.center
heading, angle, maxDist = self.heading, self.angle, self.radius
r = random.triangular(0, maxDist, maxDist)
ha = angle / 2.0
t = random.uniform(-ha, ha) + (heading + (math.pi / 2))
pt = Vector(x + (r * cos(t)), y + (r * sin(t)))
return self.orient(pt)
def isEquivalentTo(self, other):
if type(other) is not SectorRegion:
return False
return (areEquivalent(other.center, self.center)
and areEquivalent(other.radius, self.radius)
and areEquivalent(other.heading, self.heading)
and areEquivalent(other.angle, self.angle))
def __str__(self):
return f'SectorRegion({self.center},{self.radius},{self.heading},{self.angle})'
class RectangularRegion(RotatedRectangle, Region):
def __init__(self, position, heading, width, height):
super().__init__('Rectangle', position, heading, width, height)
self.position = position.toVector()
self.heading = heading
self.width = width
self.height = height
self.hw = hw = width / 2
self.hh = hh = height / 2
self.radius = hypot(hw, hh) # circumcircle; for collision detection
self.corners = tuple(position.offsetRotated(heading, Vector(*offset))
for offset in ((hw, hh), (-hw, hh), (-hw, -hh), (hw, -hh)))
self.circumcircle = (self.position, self.radius)
def sampleGiven(self, value):
return RectangularRegion(value[self.position], value[self.heading],
value[self.width], value[self.height])
def evaluateInner(self, context):
position = valueInContext(self.position, context)
heading = valueInContext(self.heading, context)
width = valueInContext(self.width, context)
height = valueInContext(self.height, context)
return RectangularRegion(position, heading, width, height)
def uniformPointInner(self):
hw, hh = self.hw, self.hh
rx = random.uniform(-hw, hw)
ry = random.uniform(-hh, hh)
pt = self.position.offsetRotated(self.heading, Vector(rx, ry))
return self.orient(pt)
def getAABB(self):
x, y = zip(*self.corners)
minx, maxx = findMinMax(x)
miny, maxy = findMinMax(y)
return ((minx, miny), (maxx, maxy))
def isEquivalentTo(self, other):
if type(other) is not RectangularRegion:
return False
return (areEquivalent(other.position, self.position)
and areEquivalent(other.heading, self.heading)
and areEquivalent(other.width, self.width)
and areEquivalent(other.height, self.height))
def __str__(self):
return f'RectangularRegion({self.position},{self.heading},{self.width},{self.height})'
class PolylineRegion(Region):
"""Region given by one or more polylines (chain of line segments)"""
def __init__(self, points=None, polyline=None, orientation=True):
super().__init__('Polyline', orientation=orientation)
if points is not None:
points = tuple(points)
if len(points) < 2:
raise RuntimeError('tried to create PolylineRegion with < 2 points')
self.points = points
self.lineString = shapely.geometry.LineString(points)
elif polyline is not None:
if isinstance(polyline, shapely.geometry.LineString):
if len(polyline.coords) < 2:
raise RuntimeError('tried to create PolylineRegion with <2-point LineString')
elif isinstance(polyline, shapely.geometry.MultiLineString):
if len(polyline) == 0:
raise RuntimeError('tried to create PolylineRegion from empty MultiLineString')
for line in polyline:
assert len(line.coords) >= 2
else:
raise RuntimeError('tried to create PolylineRegion from non-LineString')
self.lineString = polyline
else:
raise RuntimeError('must specify points or polyline for PolylineRegion')
if not self.lineString.is_valid:
raise RuntimeError('tried to create PolylineRegion with '
f'invalid LineString {self.lineString}')
self.segments = self.segmentsOf(self.lineString)
cumulativeLengths = []
total = 0
for p, q in self.segments:
dx, dy = p[0] - q[0], p[1] - q[1]
total += math.hypot(dx, dy)
cumulativeLengths.append(total)
self.cumulativeLengths = cumulativeLengths
@classmethod
def segmentsOf(cls, lineString):
if isinstance(lineString, shapely.geometry.LineString):
segments = []
points = list(lineString.coords)
if len(points) < 2:
raise RuntimeError('LineString has fewer than 2 points')
last = points[0]
for point in points[1:]:
segments.append((last, point))
last = point
return segments
elif isinstance(lineString, shapely.geometry.MultiLineString):
allSegments = []
for line in lineString:
allSegments.extend(cls.segmentsOf(line))
return allSegments
else:
raise RuntimeError('called segmentsOf on non-linestring')
def uniformPointInner(self):
pointA, pointB = random.choices(self.segments,
cum_weights=self.cumulativeLengths)[0]
interpolation = random.random()
x, y = averageVectors(pointA, pointB, weight=interpolation)
if self.orientation is True:
return OrientedVector(x, y, headingOfSegment(pointA, pointB))
else:
return self.orient(Vector(x, y))
def intersect(self, other, triedReversed=False):
poly = toPolygon(other)
if poly is not None:
intersection = self.lineString & poly
if (intersection.is_empty or
not isinstance(intersection, (shapely.geometry.LineString,
shapely.geometry.MultiLineString))):
# TODO handle points!
return nowhere
return PolylineRegion(polyline=intersection)
return super().intersect(other, triedReversed)
def containsPoint(self, point):
return self.lineString.intersects(shapely.geometry.Point(point))
def containsObject(self, obj):
return False
def getAABB(self):
xmin, ymin, xmax, ymax = self.lineString.bounds
return ((xmin, ymin), (xmax, ymax))
def show(self, plt, style='r-'):
for pointA, pointB in self.segments:
plt.plot([pointA[0], pointB[0]], [pointA[1], pointB[1]], style)
def __str__(self):
return f'PolylineRegion({self.lineString})'
def __eq__(self, other):
if type(other) is not PolylineRegion:
return NotImplemented
return (other.lineString == self.lineString)
@cached
def __hash__(self):
return hash(str(self.lineString))
class PolygonalRegion(Region):
"""Region given by one or more polygons (possibly with holes)"""
def __init__(self, points=None, polygon=None, orientation=None):
super().__init__('Polygon', orientation=orientation)
if polygon is None and points is None:
raise RuntimeError('must specify points or polygon for PolygonalRegion')
if polygon is None:
points = tuple(points)
if len(points) == 0:
raise RuntimeError('tried to create PolygonalRegion from empty point list!')
for point in points:
if needsSampling(point):
raise RuntimeError('only fixed PolygonalRegions are supported')
self.points = points
polygon = shapely.geometry.Polygon(points)
if isinstance(polygon, shapely.geometry.Polygon):
self.polygons = shapely.geometry.MultiPolygon([polygon])
elif isinstance(polygon, shapely.geometry.MultiPolygon):
self.polygons = polygon
else:
raise RuntimeError(f'tried to create PolygonalRegion from non-polygon {polygon}')
if not self.polygons.is_valid:
raise RuntimeError('tried to create PolygonalRegion with '
f'invalid polygon {self.polygons}')
if points is None and len(self.polygons) == 1 and len(self.polygons[0].interiors) == 0:
self.points = tuple(self.polygons[0].exterior.coords[:-1])
if self.polygons.is_empty:
raise RuntimeError('tried to create empty PolygonalRegion')
triangles = []
for polygon in self.polygons:
triangles.extend(triangulatePolygon(polygon))
assert len(triangles) > 0, self.polygons
self.trianglesAndBounds = tuple((tri, tri.bounds) for tri in triangles)
areas = (triangle.area for triangle in triangles)
self.cumulativeTriangleAreas = tuple(itertools.accumulate(areas))
def uniformPointInner(self):
triangle, bounds = random.choices(
self.trianglesAndBounds,
cum_weights=self.cumulativeTriangleAreas)[0]
minx, miny, maxx, maxy = bounds
# TODO improve?
while True:
x, y = random.uniform(minx, maxx), random.uniform(miny, maxy)
if triangle.intersects(shapely.geometry.Point(x, y)):
return self.orient(Vector(x, y))
def intersect(self, other, triedReversed=False):
poly = toPolygon(other)
orientation = other.orientation if self.orientation is None else self.orientation
if poly is not None:
intersection = self.polygons & poly
if intersection.is_empty:
return nowhere
elif isinstance(intersection, (shapely.geometry.Polygon,
shapely.geometry.MultiPolygon)):
return PolygonalRegion(polygon=intersection, orientation=orientation)
elif isinstance(intersection, shapely.geometry.GeometryCollection):
polys = []
for geom in intersection:
if isinstance(geom, shapely.geometry.Polygon):
polys.append(geom)
if len(polys) == 0:
# TODO handle points, lines
raise RuntimeError('unhandled type of polygon intersection')
intersection = shapely.geometry.MultiPolygon(polys)
return PolygonalRegion(polygon=intersection, orientation=orientation)
else:
# TODO handle points, lines
raise RuntimeError('unhandled type of polygon intersection')
return super().intersect(other, triedReversed)
def union(self, other):
poly = toPolygon(other)
if not poly:
raise RuntimeError(f'cannot take union of PolygonalRegion with {other}')
union = polygonUnion((self.polygons, poly))
return PolygonalRegion(polygon=union)
def containsPoint(self, point):
return self.polygons.intersects(shapely.geometry.Point(point))
def containsObject(self, obj):
objPoly = obj.polygon
if objPoly is None:
raise RuntimeError('tried to test containment of symbolic Object!')
# TODO improve boundary handling?
return self.polygons.contains(objPoly)
def getAABB(self):
xmin, xmax, ymin, ymax = self.polygons.bounds
return ((xmin, ymin), (xmax, ymax))
def show(self, plt, style='r-'):
plotPolygon(self.polygons, plt, style=style)
def __str__(self):
return '<PolygonalRegion>'
def __eq__(self, other):
if type(other) is not PolygonalRegion:
return NotImplemented
return (other.polygons == self.polygons
and other.orientation == self.orientation)
@cached
def __hash__(self):
# TODO better way to hash mutable Shapely geometries? (also for PolylineRegion)
return hash((str(self.polygons), self.orientation))
class PointSetRegion(Region):
"""Region consisting of a set of discrete points.
No :obj:`~scenic.core.object_types.Object` | |
"""Database schema functions and information for Toron node files.
Toron nodes are stored as individual files. The file format is
managed, internally, as a relational database. The schema for this
database is shown below as a simplified ERD (entity relationship
diagram). SQL foreign key relationships are represented with hyphen
and pipe characters ('-' and '|'). Other, more complex relationships
are represented with bullet points ('•') and these are enforced at
the application layer:
+------------------+
+---------------------+ | relation |
| edge | +------------------+
+---------------------+ | relation_id | •••• <Other Node>
| edge_id |------->| edge_id | •
| name | ••••••| other_element_id |<•••••
| type_info | • •••| element_id |<-+ +--------------+
| description | • • | proportion | | | quantity |
| user_properties | • • | mapping_level | | +--------------+
| other_uuid | • • +------------------+ | | quantity_id |
| other_filename_hint | • • | +->| _location_id |
| other_element_hash |<•• • | | | attributes |
| is_complete |<••••• +-----------------+ | | value |
+---------------------+ | | +--------------+
| |
+------------+ | +--------------+ | +---------------+
| element | | | location | | | structure |
+------------+ | +--------------+ | +---------------+
+------| element_id |--+ | _location_id |--+ | _structure_id |
| | label_a |••••>| label_a |<••••| label_a |
| | label_b |••••>| label_b |<••••| label_b |
| | label_c |••••>| label_c |<••••| label_c |
| | ... |••••>| ... |<••••| ... |
| +------------+ +--------------+ +---------------+
|
| +-------------------+ +----------+
| | element_weight | +-------------+ | property |
| +-------------------+ | weight | +----------+
| | element_weight_id | +-------------+ | key |
| | weight_id |<----| weight_id | | value |
+->| element_id |••• | name | +----------+
| value | • | type_info |
+-------------------+ • | description |
••>| is_complete |
+-------------+
"""
import itertools
import os
import re
import sqlite3
from contextlib import contextmanager
from json import loads as _loads
from urllib.parse import quote as urllib_parse_quote
from ._exceptions import ToronError
sqlite3.register_converter('TEXT_JSON', _loads)
sqlite3.register_converter('TEXT_ATTRIBUTES', _loads)
def _is_sqlite_json1_enabled():
"""Check if SQLite implementation includes JSON1 extension."""
# The inclusion of JSON functions is optional when compiling SQLite.
# In versions 3.38.0 and newer, JSON functions are included by
# default but can be disabled (opt-out policy). For older versions
# of SQLite, JSON functions are available on an opt-in basis. It is
# necessary to test for their presence rathern than referencing the
# SQLite version number.
#
# For more information, see:
# https://www.sqlite.org/json1.html#compiling_in_json_support
con = sqlite3.connect(':memory:')
try:
con.execute("SELECT json_valid('123')")
except sqlite3.OperationalError:
return False
finally:
con.close()
return True
SQLITE_JSON1_ENABLED = _is_sqlite_json1_enabled()
_schema_script = """
PRAGMA foreign_keys = ON;
CREATE TABLE edge(
edge_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
user_properties TEXT_USERPROPERTIES,
other_uuid TEXT CHECK (other_uuid LIKE '________-____-____-____-____________') NOT NULL,
other_filename_hint TEXT NOT NULL,
other_element_hash TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name, other_uuid)
);
CREATE TABLE relation(
relation_id INTEGER PRIMARY KEY,
edge_id INTEGER,
other_element_id INTEGER NOT NULL,
element_id INTEGER,
proportion REAL CHECK (0.0 < proportion AND proportion <= 1.0) NOT NULL,
mapping_level INTEGER NOT NULL,
FOREIGN KEY(edge_id) REFERENCES edge(edge_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (edge_id, other_element_id, element_id)
);
CREATE TABLE element(
element_id INTEGER PRIMARY KEY AUTOINCREMENT /* <- Must not reuse id values. */
/* label columns added programmatically */
);
CREATE TABLE location(
_location_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE structure(
_structure_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE quantity(
quantity_id INTEGER PRIMARY KEY,
_location_id INTEGER,
attributes TEXT_ATTRIBUTES NOT NULL,
value NUMERIC NOT NULL,
FOREIGN KEY(_location_id) REFERENCES location(_location_id)
);
CREATE TABLE weight(
weight_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name)
);
CREATE TABLE element_weight(
element_weight_id INTEGER PRIMARY KEY,
weight_id INTEGER,
element_id INTEGER,
value REAL NOT NULL,
FOREIGN KEY(weight_id) REFERENCES weight(weight_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (element_id, weight_id)
);
CREATE TABLE property(
key TEXT PRIMARY KEY NOT NULL,
value TEXT_JSON
);
INSERT INTO property VALUES ('schema_version', '1');
"""
def _is_wellformed_json(x):
"""Return 1 if *x* is well-formed JSON or return 0 if *x* is not
well-formed. This function should be registered with SQLite (via
the create_function() method) when the JSON1 extension is not
available.
This function mimics the JSON1 json_valid() function, see:
https://www.sqlite.org/json1.html#jvalid
"""
try:
_loads(x)
except (ValueError, TypeError):
return 0
return 1
def _make_trigger_for_json(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_JSON type columns.
The trigger will pass without error if the JSON is wellformed.
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND json_valid(NEW.{column}) = 0
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_json(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON');
END;
'''
def _is_wellformed_user_properties(x):
"""Check if *x* is a wellformed TEXT_USERPROPERTIES value.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object. Returns 1 if *x* is valid or 0 if
it's not.
This function should be registered as an application-defined
SQL function and used in queries when SQLite's JSON1 extension
is not enabled.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if isinstance(obj, dict):
return 1
return 0
def _make_trigger_for_user_properties(insert_or_update, table, column):
"""Return a CREATE TRIGGER statement to check TEXT_USERPROPERTIES
values. This trigger is used to check values before they are saved
in the database.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object.
The trigger will pass without error if the value is wellformed.
"""
if SQLITE_JSON1_ENABLED:
user_properties_check = f"(json_valid(NEW.{column}) = 0 OR json_type(NEW.{column}) != 'object')"
else:
user_properties_check = f'is_wellformed_user_properties(NEW.{column}) = 0'
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN
NEW.{column} IS NOT NULL
AND {user_properties_check}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON object');
END;
'''
def _is_wellformed_attributes(x):
"""Returns 1 if *x* is a wellformed TEXT_ATTRIBUTES column
value else returns 0. TEXT_ATTRIBUTES should be flat, JSON
object strings. This function should be registered with SQLite
(via the create_function() method) when the JSON1 extension
is not available.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if not isinstance(obj, dict):
return 0
for value in obj.values():
if not isinstance(value, str):
return 0
return 1
def _make_trigger_for_attributes(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_ATTRIBUTES
type columns.
The trigger will pass without error if the JSON is a wellformed
"object" containing "text" values.
The trigger will raise an error if the value is:
* not wellformed JSON
* not an "object" type
* an "object" type that contains one or more "integer", "real",
"true", "false", "null", "object" or "array" types
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND (json_valid(NEW.{column}) = 0
OR json_type(NEW.{column}) != 'object'
OR (SELECT COUNT(*)
FROM json_each(NEW.{column})
WHERE json_each.type != 'text') != 0)
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_attributes(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be a JSON object with text values');
END;
'''
def _add_functions_and_triggers(connection):
"""Create triggers and application-defined functions *connection*.
Note: This function must not be executed on an empty connection.
The table schema must exist before triggers can be created.
"""
if not SQLITE_JSON1_ENABLED:
try:
connection.create_function(
'is_wellformed_json', 1, _is_wellformed_json, deterministic=True)
connection.create_function(
'is_wellformed_user_properties', 1, _is_wellformed_user_properties, deterministic=True)
connection.create_function(
'is_wellformed_attributes', 1, _is_wellformed_attributes, deterministic=True)
except TypeError:
connection.create_function('is_wellformed_json', 1, _is_wellformed_json)
connection.create_function('is_wellformed_user_properties', 1, _is_wellformed_user_properties)
connection.create_function('is_wellformed_attributes', 1, _is_wellformed_attributes)
connection.execute(_make_trigger_for_json('INSERT', | |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import copy
import re
from collections import OrderedDict
from ..errors import ArgumentError, CubesError
from ..metadata import Dimension, Cube
from ..logging import get_logger
from .. import compat
__all__ = [
"Cell",
"Cut",
"PointCut",
"RangeCut",
"SetCut",
"cuts_from_string",
"string_from_cuts",
"string_from_path",
"string_from_hierarchy",
"path_from_string",
"cut_from_string",
"cut_from_dict",
]
NULL_PATH_VALUE = '__null__'
class Cell(object):
"""Part of a cube determined by slicing dimensions. Immutable object."""
def __init__(self, cube=None, cuts=None):
if not isinstance(cube, Cube):
raise ArgumentError("Cell cube should be sublcass of Cube, "
"provided: %s" % type(cube).__name__)
self.cube = cube
self.cuts = cuts if cuts is not None else []
def __and__(self, other):
"""Returns a new cell that is a conjunction of the two provided
cells. The cube has to match."""
if self.cube != other.cube:
raise ArgumentError("Can not combine two cells from different "
"cubes '%s' and '%s'."
% (self.cube.name, other.cube.name))
cuts = self.cuts + other.cuts
return Cell(self.cube, cuts=cuts)
def to_dict(self):
"""Returns a dictionary representation of the cell"""
result = {
"cube": str(self.cube.name),
"cuts": [cut.to_dict() for cut in self.cuts]
}
return result
@property
def all_attributes(self):
"""Returns an unordered set of key attributes used in the cell's
cuts."""
attributes = set()
for cut in self.cuts:
depth = cut.level_depth()
if depth:
dim = self.cube.dimension(cut.dimension)
hier = dim.hierarchy(cut.hierarchy)
keys = [dim.attribute(level.key.name) for level in hier[0:depth]]
attributes |= set(keys)
return list(attributes)
# Backward compatibility
# TODO: issue warning
@property
def key_attributes(self):
return self.all_attributes
def slice(self, cut):
"""Returns new cell by slicing receiving cell with `cut`. Cut with
same dimension as `cut` will be replaced, if there is no cut with the
same dimension, then the `cut` will be appended.
"""
# Fix for wrong early design decision:
if isinstance(cut, Dimension) or isinstance(cut, compat.string_type):
raise CubesError("slice() should now be called with a cut (since v0.9.2). To get "
"original behaviour of one-dimension point cut, "
"use cell.slice(PointCut(dim,path))")
cuts = self.cuts[:]
index = self._find_dimension_cut(cut.dimension)
if index is not None:
cuts[index] = cut
else:
cuts.append(cut)
return Cell(cube=self.cube, cuts=cuts)
def _find_dimension_cut(self, dimension):
"""Returns index of first occurence of cut for `dimension`. Returns
``None`` if no cut with `dimension` is found."""
names = [str(cut.dimension) for cut in self.cuts]
try:
index = names.index(str(dimension))
return index
except ValueError:
return None
def point_slice(self, dimension, path):
"""
Create another cell by slicing receiving cell through `dimension`
at `path`. Receiving object is not modified. If cut with dimension
exists it is replaced with new one. If path is empty list or is none,
then cut for given dimension is removed.
Example::
full_cube = Cell(cube)
contracts_2010 = full_cube.point_slice("date", [2010])
Returns: new derived cell object.
.. warning::
Depreiated. Use :meth:`cell.slice` instead with argument
`PointCut(dimension, path)`
"""
dimension = self.cube.dimension(dimension)
cuts = self.dimension_cuts(dimension, exclude=True)
if path:
cut = PointCut(dimension, path)
cuts.append(cut)
return Cell(cube=self.cube, cuts=cuts)
def drilldown(self, dimension, value, hierarchy=None):
"""Create another cell by drilling down `dimension` next level on
current level's key `value`.
Example::
cell = cubes.Cell(cube)
cell = cell.drilldown("date", 2010)
cell = cell.drilldown("date", 1)
is equivalent to:
cut = cubes.PointCut("date", [2010, 1])
cell = cubes.Cell(cube, [cut])
Reverse operation is ``cubes.rollup("date")``
Works only if the cut for dimension is `PointCut`. Otherwise the
behaviour is undefined.
If `hierarchy` is not specified (by default) then default dimension
hierarchy is used.
Returns new derived cell object.
"""
dimension = self.cube.dimension(dimension)
dim_cut = self.cut_for_dimension(dimension)
old_path = dim_cut.path if dim_cut else []
new_cut = PointCut(dimension, old_path + [value], hierarchy=hierarchy)
cuts = [cut for cut in self.cuts if cut is not dim_cut]
cuts.append(new_cut)
return Cell(cube=self.cube, cuts=cuts)
def multi_slice(self, cuts):
"""Create another cell by slicing through multiple slices. `cuts` is a
list of `Cut` object instances. See also :meth:`Cell.slice`."""
if isinstance(cuts, dict):
raise CubesError("dict type is not supported any more, use list of Cut instances")
cell = self
for cut in cuts:
cell = cell.slice(cut)
return cell
def cut_for_dimension(self, dimension):
"""Return first found cut for given `dimension`"""
dimension = self.cube.dimension(dimension)
cut_dimension = None
for cut in self.cuts:
cut_dimension = self.cube.dimension(cut.dimension)
if cut_dimension == dimension:
return cut
return None
def point_cut_for_dimension(self, dimension):
"""Return first point cut for given `dimension`"""
dimension = self.cube.dimension(dimension)
cutdim = None
for cut in self.cuts:
cutdim = self.cube.dimension(cut.dimension)
if isinstance(cut, PointCut) and cutdim == dimension:
return cut
return None
def rollup_dim(self, dimension, level=None, hierarchy=None):
"""Rolls-up cell - goes one or more levels up through dimension
hierarchy. If there is no level to go up (we are at the top level),
then the cut is removed.
If no `hierarchy` is specified, then the default dimension's hierarchy
is used.
Returns new cell object.
"""
# FIXME: make this the default roll-up
# Reason:
# * simpler to use
# * can be used more nicely in Jinja templates
dimension = self.cube.dimension(dimension)
dim_cut = self.point_cut_for_dimension(dimension)
if not dim_cut:
return copy.copy(self)
# raise ValueError("No cut to roll-up for dimension '%s'" % dimension.name)
cuts = [cut for cut in self.cuts if cut is not dim_cut]
hier = dimension.hierarchy(hierarchy)
rollup_path = hier.rollup(dim_cut.path, level)
# If the rollup path is empty, we are at the top level therefore we
# are removing the cut for the dimension.
if rollup_path:
new_cut = PointCut(dimension, rollup_path, hierarchy=hierarchy)
cuts.append(new_cut)
return Cell(cube=self.cube, cuts=cuts)
def rollup(self, rollup):
"""Rolls-up cell - goes one or more levels up through dimension
hierarchy. It works in similar way as drill down in
:meth:`AggregationBrowser.aggregate` but in the opposite direction (it
is like ``cd ..`` in a UNIX shell).
Roll-up can be:
* a string - single dimension to be rolled up one level
* an array - list of dimension names to be rolled-up one level
* a dictionary where keys are dimension names and values are
levels to be rolled up-to
.. note::
Only default hierarchy is currently supported.
"""
# FIXME: rename this to something like really_complex_rollup :-)
# Reason:
# * see reasons above for rollup_dim()
# * used only by Slicer server
cuts = OrderedDict()
for cut in self.cuts:
dim = self.cube.dimension(cut.dimension)
cuts[dim.name] = cut
new_cuts = []
# If it is a string, handle it as list of single string
if isinstance(rollup, compat.string_type):
rollup = [rollup]
if isinstance(rollup, (list, tuple)):
for dim_name in rollup:
cut = cuts.get(dim_name)
if cut is None:
continue
# raise ValueError("No cut to roll-up for dimension '%s'" % dim_name)
if isinstance(cut, PointCut):
raise NotImplementedError("Only PointCuts are currently supported for "
"roll-up (rollup dimension: %s)" % dim_name)
dim = self.cube.dimension(cut.dimension)
hier = dim.default_hierarchy
rollup_path = hier.rollup(cut.path)
cut = PointCut(cut.dimension, rollup_path)
new_cuts.append(cut)
elif isinstance(self.drilldown, dict):
for (dim_name, level_name) in rollup.items():
cut = cuts[dim_name]
if not cut:
raise ArgumentError("No cut to roll-up for dimension '%s'" % dim_name)
if type(cut) != PointCut:
raise NotImplementedError("Only PointCuts are currently supported for "
"roll-up (rollup dimension: %s)" % dim_name)
dim = self.cube.dimension(cut.dimension)
hier = dim.default_hierarchy
rollup_path = hier.rollup(cut.path, level_name)
cut = PointCut(cut.dimension, rollup_path)
new_cuts.append(cut)
else:
raise ArgumentError("Rollup is of unknown type: %s" %
type(self.drilldown))
cell = Cell(cube=self.cube, cuts=new_cuts)
return cell
def level_depths(self):
"""Returns a dictionary of dimension names as keys and level depths
(index of deepest level)."""
levels = {}
for cut in self.cuts:
level = cut.level_depth()
dim = self.cube.dimension(cut.dimension)
dim_name = str(dim)
levels[dim_name] = max(level, levels.get(dim_name))
return levels
def deepest_levels(self, include_empty=False):
"""Returns a list of tuples: (`dimension`, `hierarchy`, `level`) where
`level` is the deepest level specified in the respective cut. If no
level is specified (empty path) and `include_empty` is `True`, then the
level will be `None`. If `include_empty` is `True` then empty levels
are not included in the result.
This method is currently used for preparing the periods-to-date
conditions.
See also: :meth:`cubes.Drilldown.deepest_levels`
"""
levels = []
for cut in self.cuts:
depth = cut.level_depth()
dim = self.cube.dimension(cut.dimension)
hier = dim.hierarchy(cut.hierarchy)
if depth:
item = (dim, hier, hier[depth-1])
elif include_empty:
item = (dim, hier, None)
levels.append(item)
return levels
def is_base(self, dimension, hierarchy=None):
"""Returns ``True`` when cell is base cell for `dimension`. Cell
is base if there is a point cut with path referring to the
most detailed level of the dimension `hierarchy`."""
hierarchy = dimension.hierarchy(hierarchy)
cut = self.point_cut_for_dimension(dimension)
if | |
<gh_stars>1-10
#
# Python stuff to improve portability
#
from __future__ import print_function
#
# Generic python packages used
#
import datetime
import fcntl
import getpass
import os
import select
import SocketServer
import socket
import subprocess
import sys
import threading
import time
from stat import *
#
# XDD packages
#
from xdd.constants import XDD_SINK_TO_SOURCE_DELAY
from xdd.core import XDDError, TransferPreconditionError
from xdd.factory import EndpointFactory
class TransferFailError(XDDError):
def __init__(source, dest, reason):
self.source = source
self.dest = dest
self.reason = reason
#
# Interface classes to interact with local and remote XDD flows
#
class TransferManager:
"""
Manager for creating, monitoring, and deallocating local and remote flows.
"""
def __init__(self):
"""Constructor"""
self.isCreated = False
self.isStarted = False
self.isComplete = False
self.isSuccess = False
self.factory = None
# Temporary variables used for showing progress
self.beginTime = None
self.currentSourceFile = None
self.currentSinkFile = None
self.currentFlowSize = 0
self.currentRestartOffset = 0
self.verboseLog = None
self.requestSize = 0
self.transferSize = None
self.restartFlag = False
self.sinkDIOFlag = False
self.sinkSerialFlag = False
self.sinkTimestampFlag = False
self.sinkVerboseFlag = False
self.sinkTarget = None
self.sinkXddPath = ''
self.sinks = []
self.sourceDIOFlag = False
self.sourceSerialFlag = False
self.sourceVerboseFlag = False
self.sourceTimestampFlag = False
self.sourceTarget = None
self.sourceXddPath = ''
self.sources = []
def setRequestSize(self, reqSize):
"""Set the chunk size of data to move across the wire"""
self.requestSize = reqSize
def setTransferSize(self, transferSize):
"""Set the size of data to move across the wire"""
self.transferSize = transferSize
def setRestartFlag(self, restartFlag):
"""Set the amount of data previously sent across the wire"""
self.restartFlag = restartFlag
def setSinkTarget(self, target, dioFlag=False, serialFlag=False):
""" Set the name of of the sink target file"""
assert not self.isCreated
self.sinkTarget = target
self.sinkDIOFlag = dioFlag
self.sinkSerialFlag = serialFlag
def setSourceTarget(self, target, dioFlag=False, serialFlag=False):
"""Set the name of the source target file"""
assert not self.isCreated
self.sourceTarget = target
self.sourceDIOFlag = dioFlag
self.sourceSerialFlag = serialFlag
def setVerbosity(self, verboseLevel, filename):
"""Set the level of verbosity"""
if 1 <= verboseLevel:
self.sinkVerboseFlag = True
self.sourceVerboseFlag = True
if 2 <= verboseLevel:
self.sinkTimestampFlag = True
self.sourceTimestampFlag = True
self.verboseLog = filename
def addSink(self, user, hostIP, hostname, threads, ifs = [], port = 40010):
"""Add a sink to the list of sinks"""
assert not self.isCreated
assert hostIP
assert hostname
assert 0 < threads
sink = {'ip': hostIP, 'hostname': hostname, 'threads': threads, 'port': port, 'ifs': []}
if 0 == len(ifs):
sink['ifs'].append(hostIP)
else:
sink['ifs'].extend(ifs)
self.sinks.append(sink)
def addSource(self, user, hostIP, hostname, threads, ifs = [], port = 40010):
"""Add a source to the list of sources"""
assert not self.isCreated
assert hostIP
assert hostname
assert 0 < threads
source = {'ip': hostIP, 'hostname': hostname, 'threads': threads, 'port': port, 'ifs': []}
if 0 == len(ifs):
source['ifs'].append(hostIP)
else:
source['ifs'].extend(ifs)
self.sources.append(source)
def setSinkXddPath(self, path):
"""Set the path sinks use to find xdd"""
self.sinkXddPath = path
def setSourceXddPath(self, path):
"""Set the path sources use to find xdd"""
self.sourceXddPath = path
def setXddPath(self, path):
"""Set the path used to find xdd"""
self.sinkXddPath = path
self.sourceXddPath = path
def performPostCreateChecks(self):
"""@return 0 if all checks pass, otherwise non-zero"""
rc = 0
# Check that all of the XDD's are the same version
base = None
current = None
for f in self.factory.getEndpoints():
current = f.protocolVersion()
if base is None:
base = current
if current is None or not current or current != base:
print("ERROR: XDD Protocols do not match", file=sys.stderr)
rc = 1
break
# Print a generic warning if the sinks do not have pre-allocation
# support
for s in self.factory.getSinkEndpoints():
if not s.hasPreallocateAvailable():
print("WARNING: XDD preallocation support not available",
file=sys.stderr)
return rc
def createEndpoints(self):
"""Create local and remote transfer host endpoints"""
assert not self.isCreated
assert 0 < self.requestSize
assert 0 < len(self.sources)
assert 0 < len(self.sinks)
assert self.sinkTarget
assert self.sourceTarget
self.factory = EndpointFactory(self.requestSize,
self.sourceDIOFlag,
self.sourceSerialFlag,
self.sourceVerboseFlag,
self.sourceTimestampFlag,
self.sourceXddPath,
self.sources,
self.sinkDIOFlag,
self.sinkSerialFlag,
self.sinkVerboseFlag,
self.sinkTimestampFlag,
self.sinkXddPath,
self.sinks)
rc = self.factory.createEndpoints()
if 0 == rc:
self.isCreated = True
rc = self.performPostCreateChecks()
return rc
def createDir(self, sourceDir, targetDir):
"""Create the directory targetDir on the sink"""
assert self.isCreated
self.beginTime = time.time()
self.currentFlowSize = 0
sink = self.factory.getSinkEndpoints()[0]
rc = sink.createDirectory(targetDir)
if 0 == rc:
self.showProgress(sourceDir, 0)
print()
return rc
def createSymlink(self, sourceLink, targetLink, targetTarget):
"""Create the directory targetDir on the sink"""
assert self.isCreated
self.beginTime = time.time()
self.currentFlowSize = 0
sink = self.factory.getSinkEndpoints()[0]
rc = sink.createSymlink(targetTarget, targetLink)
if 0 == rc:
self.showProgress(sourceLink, 0)
print()
return rc
def startTransfer(self, sourceFile, sinkFile):
"""Start all flows in correct order"""
assert self.isCreated
rc = 0
self.currentSourceFile = sourceFile
self.currentSinkFile = sinkFile
self.currentRestartOffset = 0
# If needed get a transfer size
if not self.transferSize:
s = self.factory.getSourceEndpoints()[0]
transferSize = s.getFileSize(sourceFile)
else:
transferSize = self.transferSize
# Set a restart offset
restartOffset = 0
if self.restartFlag:
s = self.factory.getSinkEndpoints()[0]
restartOffset = s.getRestartOffset(sinkFile)
self.currentRestartOffset = restartOffset
# Start sinks first
sinks = self.factory.getSinkEndpoints()
for s in sinks:
r = s.startFlow(sinkFile, transferSize,
self.restartFlag, restartOffset)
if 0 != r:
print('Sink flow failed during startup', s.errorString())
rc += 1
# Pause between sink and source startups
time.sleep(XDD_SINK_TO_SOURCE_DELAY)
# Then start sources
sources = self.factory.getSourceEndpoints()
for s in sources:
r = s.startFlow(sourceFile, transferSize,
self.restartFlag, restartOffset)
if 0 != r:
print('Source flow failed during startup', s.errorString())
rc += 1
# Update the log with the execute commands
self.writeLog(isWriteCommand=True)
# If all went well, set state and start timer
if 0 == rc:
self.isStarted = True
self.beginTime = time.time()
self.currentFlowSize = transferSize
return rc
def monitorTransfer(self, monitorInterval):
"""
Monitor the current flows. Print a progress report to the screen
and update the logfile if necessary.
"""
assert self.isStarted
endpoints = self.factory.getEndpoints()
completedBytes = 0
completeCount = 0
failureCount = 0
while completeCount < len(endpoints) and 0 == failureCount:
monBegin = time.time()
completedBytes = 0
completeCount = 0
for e in endpoints:
# Check for completions and failures
if e.completionStatus() is not None:
completeCount += 1
if 0 != e.completionStatus():
failureCount += 1
# Get the progress
currentBytes = e.currentByte()
if 0 < currentBytes:
completedBytes += currentBytes
# The flows don't really report exactly correct bytes yet, so
# modify the completed bytes to be correct, this is due to a couple
# things, sinks report the restart offset and multi-source transfers
# use -startoffset rather than -restart offset
correctedBytes = completedBytes - self.currentRestartOffset
if len(self.sources) > 1:
# Progress temporarily reports as len(sources) * restartOffset
# But then shrinks as the actual heartbeats get reported
initialVal = len(self.sources) * self.currentRestartOffset
if correctedBytes == initialVal:
correctedBytes = self.currentRestartOffset
else:
correctedBytes += self.currentRestartOffset
# Print a status message
self.showProgress(self.currentSourceFile, correctedBytes)
self.writeLog()
monEnd = time.time()
# Sleep until its time to monitor again
interval = monEnd - monBegin
if interval < monitorInterval:
time.sleep(monitorInterval - interval)
# Get all of the remaining log output
self.writeLog(flushAll=True)
# Determine the outcome of transfer
failures = 0
for e in endpoints:
if e.completionStatus() is None:
e.cancelFlow()
elif 0 != e.completionStatus():
failures += 1
print('RC:', e.completionStatus())
print('ERROR:', e.errorString())
# If the completion status for all endpoints is success, print out
# a final progress message and remove the restart cookie
if 0 == failures:
self.currentRestartOffset = 0
self.showProgress(self.currentSourceFile, self.currentFlowSize)
print('')
if self.restartFlag:
for s in self.factory.getSinkEndpoints():
# Ignore the return code on purpose
rc = s.removeRestartCookie(self.currentSinkFile)
return failures
def startAndMonitorTransfers(self, monitorInterval):
"""
Used to transfer and monitor a directory or multiple files
as part of a single transfer
"""
# First determine if the sink path exists
sink = self.factory.getSinkEndpoints()[0]
sinkIsDir = sink.pathIsDir(self.sinkTarget)
sinkExists = sink.pathExists(self.sinkTarget)
# This is tricky, if the sink path exists from a previous
# transfer attempt and this is a retry, then we need to lie about it
if self.restartFlag and sinkExists and \
sink.transferIsComplete(self.sinkTarget, self.sinkTarget):
sinkExists = False
# Use one of the sources to build the transfer pairs
# Note you have to do this on the source in case the requested
# path is a directory or symlink
source = self.factory.getSourceEndpoints()[0]
(rc, dirs, files, links) = source.buildWalk(self.sourceTarget,
self.sinkTarget,
targetExists=sinkExists,
targetIsDir=sinkIsDir)
if 0 != rc:
print('ERROR: Unable to access', self.sourceTarget)
#print(rc)
#print(dirs)
| |
testMonthlyByMinuteAndSecond(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 6, 6),
datetime(1997, 9, 2, 9, 6, 18),
datetime(1997, 9, 2, 9, 18, 6)])
def testMonthlyByHourAndMinuteAndSecond(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 6, 6),
datetime(1997, 9, 2, 18, 6, 18),
datetime(1997, 9, 2, 18, 18, 6)])
def testMonthlyBySetPos(self):
self.assertEqual(list(rrule(MONTHLY,
count=3,
bymonthday=(13, 17),
byhour=(6, 18),
bysetpos=(3, -3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 13, 18, 0),
datetime(1997, 9, 17, 6, 0),
datetime(1997, 10, 13, 18, 0)])
def testWeekly(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 9, 9, 0),
datetime(1997, 9, 16, 9, 0)])
def testWeeklyInterval(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
interval=2,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 16, 9, 0),
datetime(1997, 9, 30, 9, 0)])
def testWeeklyIntervalLarge(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
interval=20,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1998, 1, 20, 9, 0),
datetime(1998, 6, 9, 9, 0)])
def testWeeklyByMonth(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
bymonth=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 6, 9, 0),
datetime(1998, 1, 13, 9, 0),
datetime(1998, 1, 20, 9, 0)])
def testWeeklyByMonthDay(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
bymonthday=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 3, 9, 0),
datetime(1997, 10, 1, 9, 0),
datetime(1997, 10, 3, 9, 0)])
def testWeeklyByMonthAndMonthDay(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
bymonth=(1, 3),
bymonthday=(5, 7),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 5, 9, 0),
datetime(1998, 1, 7, 9, 0),
datetime(1998, 3, 5, 9, 0)])
def testWeeklyByWeekDay(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 4, 9, 0),
datetime(1997, 9, 9, 9, 0)])
def testWeeklyByNWeekDay(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 4, 9, 0),
datetime(1997, 9, 9, 9, 0)])
def testWeeklyByMonthAndWeekDay(self):
# This test is interesting, because it crosses the year
# boundary in a weekly period to find day '1' as a
# valid recurrence.
self.assertEqual(list(rrule(WEEKLY,
count=3,
bymonth=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 1, 6, 9, 0),
datetime(1998, 1, 8, 9, 0)])
def testWeeklyByMonthAndNWeekDay(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 1, 6, 9, 0),
datetime(1998, 1, 8, 9, 0)])
def testWeeklyByMonthDayAndWeekDay(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 2, 3, 9, 0),
datetime(1998, 3, 3, 9, 0)])
def testWeeklyByMonthAndMonthDayAndWeekDay(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
bymonth=(1, 3),
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 3, 3, 9, 0),
datetime(2001, 3, 1, 9, 0)])
def testWeeklyByYearDay(self):
self.assertEqual(list(rrule(WEEKLY,
count=4,
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 31, 9, 0),
datetime(1998, 1, 1, 9, 0),
datetime(1998, 4, 10, 9, 0),
datetime(1998, 7, 19, 9, 0)])
def testWeeklyByYearDayNeg(self):
self.assertEqual(list(rrule(WEEKLY,
count=4,
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 31, 9, 0),
datetime(1998, 1, 1, 9, 0),
datetime(1998, 4, 10, 9, 0),
datetime(1998, 7, 19, 9, 0)])
def testWeeklyByMonthAndYearDay(self):
self.assertEqual(list(rrule(WEEKLY,
count=4,
bymonth=(1, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 7, 19, 9, 0),
datetime(1999, 1, 1, 9, 0),
datetime(1999, 7, 19, 9, 0)])
def testWeeklyByMonthAndYearDayNeg(self):
self.assertEqual(list(rrule(WEEKLY,
count=4,
bymonth=(1, 7),
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 7, 19, 9, 0),
datetime(1999, 1, 1, 9, 0),
datetime(1999, 7, 19, 9, 0)])
def testWeeklyByWeekNo(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byweekno=20,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 5, 11, 9, 0),
datetime(1998, 5, 12, 9, 0),
datetime(1998, 5, 13, 9, 0)])
def testWeeklyByWeekNoAndWeekDay(self):
# That's a nice one. The first days of week number one
# may be in the last year.
self.assertEqual(list(rrule(WEEKLY,
count=3,
byweekno=1,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 29, 9, 0),
datetime(1999, 1, 4, 9, 0),
datetime(2000, 1, 3, 9, 0)])
def testWeeklyByWeekNoAndWeekDayLarge(self):
# Another nice test. The last days of week number 52/53
# may be in the next year.
self.assertEqual(list(rrule(WEEKLY,
count=3,
byweekno=52,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 28, 9, 0),
datetime(1998, 12, 27, 9, 0),
datetime(2000, 1, 2, 9, 0)])
def testWeeklyByWeekNoAndWeekDayLast(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byweekno=-1,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 12, 28, 9, 0),
datetime(1999, 1, 3, 9, 0),
datetime(2000, 1, 2, 9, 0)])
def testWeeklyByWeekNoAndWeekDay53(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byweekno=53,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 12, 28, 9, 0),
datetime(2004, 12, 27, 9, 0),
datetime(2009, 12, 28, 9, 0)])
def testWeeklyByEaster(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byeaster=0,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 12, 9, 0),
datetime(1999, 4, 4, 9, 0),
datetime(2000, 4, 23, 9, 0)])
def testWeeklyByEasterPos(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byeaster=1,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 13, 9, 0),
datetime(1999, 4, 5, 9, 0),
datetime(2000, 4, 24, 9, 0)])
def testWeeklyByEasterNeg(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byeaster=-1,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 4, 11, 9, 0),
datetime(1999, 4, 3, 9, 0),
datetime(2000, 4, 22, 9, 0)])
def testWeeklyByHour(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byhour=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 0),
datetime(1997, 9, 9, 6, 0),
datetime(1997, 9, 9, 18, 0)])
def testWeeklyByMinute(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 6),
datetime(1997, 9, 2, 9, 18),
datetime(1997, 9, 9, 9, 6)])
def testWeeklyBySecond(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0, 6),
datetime(1997, 9, 2, 9, 0, 18),
datetime(1997, 9, 9, 9, 0, 6)])
def testWeeklyByHourAndMinute(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 6),
datetime(1997, 9, 2, 18, 18),
datetime(1997, 9, 9, 6, 6)])
def testWeeklyByHourAndSecond(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byhour=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 0, 6),
datetime(1997, 9, 2, 18, 0, 18),
datetime(1997, 9, 9, 6, 0, 6)])
def testWeeklyByMinuteAndSecond(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 6, 6),
datetime(1997, 9, 2, 9, 6, 18),
datetime(1997, 9, 2, 9, 18, 6)])
def testWeeklyByHourAndMinuteAndSecond(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 6, 6),
datetime(1997, 9, 2, 18, 6, 18),
datetime(1997, 9, 2, 18, 18, 6)])
def testWeeklyBySetPos(self):
self.assertEqual(list(rrule(WEEKLY,
count=3,
byweekday=(TU, TH),
byhour=(6, 18),
bysetpos=(3, -3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 18, 0),
datetime(1997, 9, 4, 6, 0),
datetime(1997, 9, 9, 18, 0)])
def testDaily(self):
self.assertEqual(list(rrule(DAILY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 3, 9, 0),
datetime(1997, 9, 4, 9, 0)])
def testDailyInterval(self):
self.assertEqual(list(rrule(DAILY,
count=3,
interval=2,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 4, 9, 0),
datetime(1997, 9, 6, 9, 0)])
def testDailyIntervalLarge(self):
self.assertEqual(list(rrule(DAILY,
count=3,
interval=92,
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 12, 3, 9, 0),
datetime(1998, 3, 5, 9, 0)])
def testDailyByMonth(self):
self.assertEqual(list(rrule(DAILY,
count=3,
bymonth=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 1, 2, 9, 0),
datetime(1998, 1, 3, 9, 0)])
def testDailyByMonthDay(self):
self.assertEqual(list(rrule(DAILY,
count=3,
bymonthday=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 3, 9, 0),
datetime(1997, 10, 1, 9, 0),
datetime(1997, 10, 3, 9, 0)])
def testDailyByMonthAndMonthDay(self):
self.assertEqual(list(rrule(DAILY,
count=3,
bymonth=(1, 3),
bymonthday=(5, 7),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 5, 9, 0),
datetime(1998, 1, 7, 9, 0),
datetime(1998, 3, 5, 9, 0)])
def testDailyByWeekDay(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 4, 9, 0),
datetime(1997, 9, 9, 9, 0)])
def testDailyByNWeekDay(self):
self.assertEqual(list(rrule(DAILY,
count=3,
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1997, 9, 2, 9, 0),
datetime(1997, 9, 4, 9, 0),
datetime(1997, 9, 9, 9, 0)])
def testDailyByMonthAndWeekDay(self):
self.assertEqual(list(rrule(DAILY,
count=3,
bymonth=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0))),
[datetime(1998, 1, 1, 9, 0),
datetime(1998, 1, 6, 9, 0),
datetime(1998, 1, | |
# Copyright (c) Lawrence Livermore National Security, LLC and other Conduit
# Project developers. See top-level LICENSE AND COPYRIGHT files for dates and
# other details. No copyright assignment is required to contribute to Conduit.
"""
file: t_python_conduit_node.py
description: Unit tests for conduit::Node python module interface.
"""
import sys
import unittest
from conduit import Node
import numpy as np
class Test_Conduit_Node(unittest.TestCase):
def test_simple(self):
a_val = np.uint32(10)
b_val = np.uint32(20)
c_val = np.float64(30.0)
n = Node()
n['a'] = a_val
n['b'] = b_val
n['c'] = c_val
self.assertTrue(n['a'] == a_val)
self.assertTrue(n['b'] == b_val)
self.assertTrue(n['c'] == c_val)
def test_nested(self):
val = np.uint32(10)
n = Node()
n['a']['b'] = val
print(n['a']['b'])
self.assertEqual(n['a']['b'],val)
def test_vector(self):
vec = np.array(range(100), np.uint32)
n = Node()
n['a'] = vec
self.assertEqual(n['a'][99], 99)
def test_fetch(self):
vec = np.array(range(100), np.uint32)
n = Node()
n['a'] = vec
na = n.fetch('a')
na_val = na.value()
self.assertEqual(na_val[99], 99)
def test_child(self):
vec = np.array(range(100), np.uint32)
n = Node()
n['a'] = vec
na = n.child(0)
na_val = na.value()
self.assertEqual(na_val[99], 99)
n['b'] = vec
self.assertEqual(n.number_of_children(),2)
def test_save_load(self):
# on windows, this breaks at 27 !?
alen = 26
vec = np.array(range(alen), np.uint32)
n = Node()
n['a'] = vec
print(n)
n.save("test_pyconduit_node_save_load.conduit_bin")
nl = Node()
nl.load("test_pyconduit_node_save_load.conduit_bin")
print(nl)
self.assertEqual(nl['a'][alen-1], alen-1)
n.save("test_pyconduit_node_json_save_load.json",protocol="json")
nl = Node()
nl.load("test_pyconduit_node_json_save_load.json", protocol="json")
print(nl)
self.assertEqual(nl['a'][alen-1], alen-1)
n.save("test_pyconduit_node_base64_json_save_load.conduit_base64_json", protocol="conduit_base64_json")
nl = Node()
nl.load("test_pyconduit_node_base64_json_save_load.conduit_base64_json", protocol="conduit_base64_json")
print(nl)
self.assertEqual(nl['a'][alen-1], alen-1)
n.save("test_pyconduit_node_json_save_load.yaml",protocol="yaml")
nl = Node()
nl.load("test_pyconduit_node_json_save_load.yaml", protocol="yaml")
print(nl)
self.assertEqual(nl['a'][alen-1], alen-1)
def test_parse(self):
n = Node()
n.parse('{"a": 42.0}',"json")
self.assertTrue(n['a'] == np.float64(42.0))
n.parse('a: 52.0',"yaml")
self.assertTrue(n['a'] == np.float64(52.0))
def test_parent(self):
vec = np.array(range(100), np.uint32)
n = Node()
n['a'] = vec
na = n.fetch('a')
self.assertFalse(na.is_root())
# todo: test parent()
def test_total_bytes(self):
vec = np.array(range(100), np.uint32)
n = Node()
n['a'] = vec
self.assertEqual(n.total_strided_bytes(),4 * 100)
self.assertEqual(n.total_bytes_compact(),4 * 100)
# TODO: check if n.is_compact() should pass as well?
# it doesn't currently
self.assertTrue(n.fetch('a').is_compact())
def test_paths(self):
n = Node()
n['a'] = 1
n['b'] = 2
n['c'] = 3
for v in ['a','b','c']:
self.assertTrue(n.has_path(v))
paths = n.child_names()
for v in ['a','b','c']:
self.assertTrue(v in paths)
def test_list(self):
n = Node()
n.append().set(1)
self.assertTrue(n.child(0).value(),1)
self.assertTrue(n[0],1)
n2 = Node()
n2_c = n2.append()
n2_c.set(2)
self.assertEqual(n2.child(0).value(),2)
n3 = Node()
n3.fetch("here").append().set("a")
n3.fetch("here").append().set("b")
self.assertTrue(n3.fetch("here").child(0).value(),"a")
self.assertTrue(n3.fetch("here").child(1).value(),"b")
n4 = Node()
n4["here"].append().set("a")
n5 = n4["here"]
n5.append().set("b")
self.assertTrue(n4["here"].child(0).value(),"a")
self.assertTrue(n4["here"].child(1).value(),"b")
self.assertTrue(n4["here"][0],"a")
self.assertTrue(n4["here"][1],"b")
def test_remove(self):
n = Node()
n['a'] = 1
n['b'] = 2
n['c'] = 3
self.assertEqual(n.number_of_children(),3)
n.remove(path='c')
self.assertEqual(n.number_of_children(),2)
paths = n.child_names()
for v in ['a','b']:
self.assertTrue(v in paths)
n.remove(index=0)
paths = n.child_names()
for v in ['b']:
self.assertTrue(v in paths)
def test_info(self):
n = Node()
n['a'] = 1
n['b'] = 2
n['c'] = 3
ni = n.info();
#print ni
self.assertEqual(ni["total_strided_bytes"],n.total_strided_bytes())
def test_set_all_types(self):
types = [ 'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64']
for type in types:
data = np.array(range(10), dtype=type)
n = Node()
n.set(data)
for i in range(len(data)):
self.assertEqual(n.value()[i], data[i])
def test_set_external(self):
types = ['uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64']
for type in types:
ext_data = np.array(range(10), dtype=type)
n = Node()
n.set_external(ext_data)
for i in range(len(ext_data)):
self.assertEqual(n.value()[i], ext_data[i])
ext_data[5] = 11
n.value()[8] = 77
n.value()[2] = 8
for i in range(len(ext_data)):
self.assertEqual(n.value()[i], ext_data[i])
def test_set_external_node(self):
n = Node()
n.set(np.array(range(10), np.int32))
n2 = Node()
# test set external with node
n2.set_external(n)
for i in range(10):
self.assertEqual(n.value()[i], n2.value()[i])
n.value()[2] = 8
n.value()[8] = 77
# set of n should reflect in n2 with set_external
self.assertEqual(8, n2.value()[2])
self.assertEqual(77, n2.value()[8])
def test_set_external_basic_slice(self):
types = ['uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64']
for type in types:
base_data = np.array(range(20), dtype=type)
ext_data = base_data[1:16]
n = Node()
n.set_external(ext_data)
for i in range(len(ext_data)):
self.assertEqual(n.value()[i], ext_data[i])
ext_data[5] = 11
n.value()[6] = 77
n.value()[2] = 8
for i in range(len(ext_data)):
self.assertEqual(n.value()[i], ext_data[i])
def test_set_external_basic_strides(self):
types = ['uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64']
for type in types:
base_data = np.array(range(20), dtype=type)
ext_data = base_data[1:16:2]
n = Node()
n.set_external(ext_data)
for i in range(len(ext_data)):
self.assertEqual(n.value()[i], ext_data[i])
ext_data[5] = 11
n.value()[6] = 77
n.value()[2] = 8
for i in range(len(ext_data)):
self.assertEqual(n.value()[i], ext_data[i])
def test_diff(self):
n1 = Node()
n2 = Node()
info = Node()
n1['a'] = 1
self.assertTrue(n1.diff(n2,info))
print(info)
n2['a'] = 1
self.assertFalse(n1.diff(n2,info))
n2['b'] = 2.0
self.assertTrue(n1.diff(n2,info))
self.assertFalse(n1.diff_compatible(n2,info))
n1['b'] = 1.0
self.assertFalse(n1.diff(n2,info,10))
def test_list_of_ints(self):
# also covered by test_set_all_types
# but this was the reproducer for
# https://github.com/LLNL/conduit/issues/281
n = Node()
a = np.array(list((1,2,3)))
n['a'] = a
self.assertEqual(n['a'][0], 1)
self.assertEqual(n['a'][1], 2)
self.assertEqual(n['a'][2], 3)
def test_compact_to(self):
n = Node()
n['a'] = 1
n['b'] = 2
n['c'] = 3
ni = n.info()
self.assertEqual(ni["mem_spaces"].number_of_children(), 3)
n2 = Node()
n.compact_to(n2)
ni = n2.info()
print(ni)
self.assertEqual(ni["mem_spaces"].number_of_children(), 1)
def test_update(self):
n = Node()
data = np.array(range(10), dtype='float64')
n["data"].set_external(data)
print(n)
n2 = Node()
n2.update(n)
print(n2)
self.assertEqual(n2["data"][0],0)
n3 = Node()
n3.update_external(n)
data[0] = 10
print(n3)
self.assertEqual(n3["data"][0],10)
n4 = Node()
n4["data"] = 10
n4.update_compatible(n)
print(n4)
self.assertEqual(n4["data"],10)
def test_reset(self):
n = Node()
data = np.array(range(10), dtype='float64')
n["data"].set_external(data)
print(n)
n.reset()
self.assertEqual(n.number_of_children(), 0)
def test_child_rename(self):
a_val = np.uint32(10)
b_val = np.uint32(20)
n = Node()
with self.assertRaises(Exception):
n.rename_child('a','b')
n['a'] = a_val
n['b'] = b_val
with self.assertRaises(Exception):
n.rename_child('bad','good')
with self.assertRaises(Exception):
n.rename_child('b','a')
self.assertTrue(n['a'] == a_val)
self.assertTrue(n['b'] == b_val)
n.rename_child('b','c')
self.assertTrue(n['a'] == a_val)
self.assertTrue(n['c'] == b_val)
def test_string(self):
n = Node();
n.set("my string!")
print(n)
self.assertEqual(n.value(),"my string!")
# test numpy string
nps = np.string_("my numpy string!")
n.set(nps)
print(n)
print(repr(n))
self.assertEqual(n.value(),"my numpy string!")
aofstrs = np.array(["here","are","a","few","strings"])
print(aofstrs)
n.set(aofstrs)
print(n)
self.assertEqual(n[0],"here")
self.assertEqual(n[1],"are")
self.assertEqual(n[2],"a")
self.assertEqual(n[3],"few")
self.assertEqual(n[4],"strings")
def test_numeric_tuples(self):
n = Node()
n["tuple_0"].set((1, 2, 3, 4))
n["tuple_1"].set((1.0, 2.0, 3.0, 4.0))
n["tuple_2"].set((1, 2, 3, 4.0))
print(n)
self.assertEqual(n['tuple_0'][0], 1)
self.assertEqual(n['tuple_0'][1], 2)
self.assertEqual(n['tuple_0'][2], 3)
self.assertEqual(n['tuple_0'][3], 4)
self.assertEqual(n['tuple_1'][0], 1.0)
self.assertEqual(n['tuple_1'][1], 2.0)
self.assertEqual(n['tuple_1'][2], 3.0)
self.assertEqual(n['tuple_1'][3], 4.0)
self.assertEqual(n['tuple_2'][0], 1.0)
self.assertEqual(n['tuple_2'][1], 2.0)
self.assertEqual(n['tuple_2'][2], 3.0)
self.assertEqual(n['tuple_2'][3], 4.0)
def test_numeric_lists(self):
n = Node()
n["list_0"].set((1, 2, 3, 4))
n["list_1"].set((1.0, 2.0, 3.0, 4.0))
n["list_2"].set((1, 2, 3, 4.0))
print(n)
self.assertEqual(n['list_0'][0], 1)
self.assertEqual(n['list_0'][1], 2)
self.assertEqual(n['list_0'][2], 3)
self.assertEqual(n['list_0'][3], 4)
self.assertEqual(n['list_1'][0], 1.0)
self.assertEqual(n['list_1'][1], 2.0)
self.assertEqual(n['list_1'][2], 3.0)
self.assertEqual(n['list_1'][3], 4.0)
self.assertEqual(n['list_2'][0], 1.0)
self.assertEqual(n['list_2'][1], 2.0)
self.assertEqual(n['list_2'][2], 3.0)
self.assertEqual(n['list_2'][3], 4.0)
def test_general_tuples(self):
n = Node()
n.set((1, "here"))
print(n)
self.assertEqual(n[0], 1.0)
self.assertEqual(n[1], "here")
def test_general_lists(self):
n = Node()
n.set([1, "there"])
print(n)
self.assertEqual(n[0], 1.0)
self.assertEqual(n[1], "there")
def test_key_with_slash(self):
n = Node()
n["normal/path"] = 10
n.add_child("child_with_/_inside").set(42)
print(n)
self.assertTrue(n.has_path("normal/path"))
self.assertFalse(n.has_child("normal/path"))
self.assertFalse(n.has_path("child_with_/_inside"))
self.assertTrue(n.has_child("child_with_/_inside"))
self.assertEqual(2,n.number_of_children())
self.assertEqual(n["normal/path"],10);
self.assertEqual(n.child(name="child_with_/_inside").value(),42);
n["normal"].remove_child("path")
self.assertFalse(n.has_path("normal/path"))
def test_fetch_existing(self):
n = Node()
n["my/path"] = 10
n_sub = n.fetch_existing("my/path")
self.assertEqual(n_sub.value(),10);
with self.assertRaises(Exception):
n.fetch_existing('bad/path')
def test_to_string(self):
a_val = np.uint32(10)
b_val = np.uint32(20)
c_val = np.float64(30.0)
n = Node()
n['a'] = a_val
n['b'] = b_val
n['c'] = c_val
res_to_str_def = n.to_string()
res_to_str_yaml = n.to_string(protocol="yaml")
res_to_str_json = n.to_string(protocol="json")
res_to_yaml = n.to_yaml()
res_to_json = n.to_json()
self.assertEqual(res_to_str_def, res_to_yaml);
self.assertEqual(res_to_str_yaml, res_to_yaml);
self.assertEqual(res_to_str_json, res_to_json);
n.print_detailed()
def test_numpy_slice_as_set_input(self):
n = Node()
# slice with non trivial strides
numpy_array = np.array(range(21), dtype='float64')
v = numpy_array.reshape((3, 7))
print("Input Array")
print(v)
print("Desired Slice")
print(v[:,0])
n['v'] = v
n['vs'] = v[:,0]
n['vs_expected'] = np.array(v[:,0],np.float64)
print(n)
sdiff = np.setdiff1d(n['vs'], v[:,0])
print("Set Difference: ",sdiff )
self.assertEqual(len(sdiff), 0);
# a more complex slice
numpy_array = np.array(range(105), dtype='float64')
v = numpy_array.reshape((3, 7, 5))
print("Input Array")
print(v)
print("Desired Slice")
print(v[:,0,3:5])
n['v'] = v
n['vs'] = v[:,0,3:5]
n['vs_expected'] = np.array(v[:,0,3:5],np.float64)
print(n)
sdiff = np.setdiff1d(n['vs'], v[:,0,3:5])
print("Set Difference: ",sdiff )
self.assertEqual(len(sdiff), 0);
def test_numpy_slice_as_set_external_input(self):
n = Node()
# slice with non trivial strides
numpy_array = np.array(range(21), dtype='float64')
v = numpy_array.reshape((3, 7))
print("Input Array")
print(v)
print("Desired Slice")
print(v[:,0])
n['v'] = v
n['vs'].set_external(v[:,0])
n['vs_expected'] = np.array(v[:,0],np.float64)
print(n)
sdiff = np.setdiff1d(n['vs'], v[:,0])
print("Set Difference: ",sdiff )
self.assertEqual(len(sdiff), 0);
# a more complex slice, can't use set external here.
n = Node()
numpy_array = np.array(range(105), dtype='float64')
v = numpy_array.reshape((3, 7, 5))
with self.assertRaises(TypeError):
n['vs'].set_external(v[:,0,3:5])
# lets do a 1-d eff slice, this should work since
# it reduces to a 1-D strided case
n['vs'].set_external(v[:,0,0])
n['vs_expected'] = np.array(v[:,0,0],np.float64)
def test_describe(self):
n = Node()
n["a"] = [1,2,3,4,5];
n["b"] = [1,2,3];
n["c"] = [1,2,3,4,5,6];
n["d"] = [1,2,3,4,5,6,7];
n["e"] = [1,2,3,4,5,6,7,8,9,10,11,12];
n["f"] = [1.0,2.0,3.0,4.0,5.0,6.0,7.0];
n["g"] = [2.0,4.0];
d = n.describe()
print(d)
self.assertEqual(d["a/count"],5);
self.assertEqual(d["b/count"],3);
self.assertEqual(d["c/count"],6);
self.assertEqual(d["d/count"],7);
self.assertEqual(d["e/count"],12);
self.assertEqual(d["f/count"],7);
self.assertEqual(d["a/min"],1)
self.assertEqual(d["b/min"],1)
self.assertEqual(d["c/min"],1)
self.assertEqual(d["d/min"],1)
self.assertEqual(d["e/min"],1)
self.assertEqual(d["f/min"],1.0)
self.assertEqual(d["a/max"],5)
self.assertEqual(d["b/max"],3)
self.assertEqual(d["c/max"],6)
self.assertEqual(d["d/max"],7)
self.assertEqual(d["e/max"],12)
self.assertEqual(d["f/max"],7.0)
self.assertEqual(d["g/mean"],3.0);
opts = Node()
opts["threshold"] = 10
d = n.describe(opts)
print(d)
def test_summary_string(self):
n = Node()
n["a"] = [1,2,3,4,5];
n["b"] = [1,2,3];
n["c"] = [1,2,3,4,5,6];
n["d"] = [1,2,3,4,5,6,7];
n["e"] = [1,2,3,4,5,6,7,8,9,10,11,12];
n["f"] = [1.0,2.0,3.0,4.0,5.0,6.0,7.0];
n["g"] = [2.0,4.0];
print(repr(n))
r = n.to_summary_string()
| |
import sys
import threading
import ctypes
from os import path,remove,unlink,mkdir
import time
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt,QEvent,QPoint,QRect,QLine
from PyQt5.QtWidgets import QAbstractSlider, QApplication, QMainWindow, QToolBar
from PyQt5.QtGui import QMouseEvent
from PyQt5.uic import loadUi
from Ch_color_picker import Ui_Dialog as ch_color_diag
from info_window import Ui_infoWindow
import edited_navigationtoolbar2qt_main
from measurement_ import Ui_MeasureWindow
from mplsmall_main import *
from mplsmall_FFT import *
import extended_main as exmain
from fft_widget import FFT_handler
from config import config
import csv
import numpy as np
from matplotlib.lines import Line2D
from matplotlib import cbook, cm, colors as m_colors
from matplotlib.ticker import AutoMinorLocator
from PyQt5.Qt import QMetaMethod
##imports for pyinstaller
import mplwidget
import mplsmall_main
import mplsmall_FFT
import fft_widget
import v_spinbox
import fft_refference_select_spin
import fft_ydiv_spin
import freq_div_spin
import t_spinbox
import v_spinbox
import window_param_spin
from datetime import datetime
class Ui_MainWindow(QtWidgets.QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
loadUi("main.ui",self)
self.setWindowTitle("SCOPE ANALYSER")
self.fft_panel_frame.setVisible(False)
self.mplsmall_FFT.setVisible(False)
self.main_panel_frame.setVisible(True)
self.mplsmall.setVisible(True)
self.setWindowIcon(QtGui.QIcon('res\\icon.ico'))
self.debug_=True
self.original_stdout=sys.stdout
self.stdouttofile()
###################################################CONFIG#################################################################
self.conf=config(self)
####################################################BINDINGS##############################################################
self.axes_list=[]
self.plot_list=[]
self.line_list=[]
self.label_list=[]
self.zeroline_list=[]
self.zeroline_visibility_list=[]
user32 = ctypes.windll.user32
self.screensize = [user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)]
self.set_sizes()
self.main_ylim=[0,0]
self.plot_opacity=0
self.get_config(False)####not called by actionbutton
self.stdouttofile()
self.tabWidget.setStyleSheet("QTabWidget::pane {border-right: 0px;border-bottom: 0px;border-left:0px;border-top:0px;}")
self.figure = self.MplWidget.figure
self.plot_axes = self.figure.add_subplot(1, 1, 1)
self.canvas=self.MplWidget.canvas
self.plot_canvas=self.figure.canvas
self.plot_axes.patch.set_facecolor('xkcd:black')
self.plot_axes.spines['bottom'].set_color('white')
self.plot_axes.spines['top'].set_color('white')
self.plot_axes.spines['right'].set_color('white')
self.plot_axes.spines['left'].set_color('white')
self.plot_axes.tick_params(axis='x', colors='white')
#self.plot_axes.tick_params(axis='y', colors='white')
#self.plot_axes.legend(loc='upper right')
self.plot_axes.patch.set_facecolor('xkcd:black')
#self.plot_axes.set_xticklabels([])
self.plot_axes.set_yticklabels([])
self.plot_axes.grid(which='major', axis="both", alpha=0.9, linestyle='--',color='white')
self.plot_axes.grid(which='minor', axis="both", alpha=0.5, linestyle='-.',color='white')
self.plot_axes.tick_params(which='minor', length=4)
self.plot_axes.yaxis.set_minor_locator(AutoMinorLocator(5))
self.plot_axes.xaxis.set_minor_locator(AutoMinorLocator(5))
self.plot_axes.set_ylim(self.main_ylim)
self.plot_canvas.draw()
#self.plot_fig = self.MplWidget.figure
self.figure.tight_layout(pad=0, w_pad=None, h_pad=None)
self.plot_canvas.mpl_connect('draw_event',self.On_Canvas_drawn)
self.toolbar = edited_navigationtoolbar2qt_main.NavigationToolbar2QT(self.plot_canvas, self)
self.toolbar.setStyleSheet("""
QWidget {
background-color:qlineargradient(spread:pad, x1:0.969955, y1:0.159, x2:0, y2:1, stop:0.0945274 rgba(92, 167, 206, 255), stop:1 rgba(255, 255, 255, 255))\n
}
""")
self.exmain=exmain.exmain(self)
self.addToolBar(QtCore.Qt.BottomToolBarArea,self.FFT_Widget.toolbar)
self.FFT_Widget.toolbar.setVisible(False)
self.addToolBar(QtCore.Qt.BottomToolBarArea,self.toolbar)
self.actionOpen.triggered.connect(self.file_open)
self.actionChannel_Color.triggered.connect(self.open_MenuColor_dialog)
self.actionReload_config.triggered.connect(self.conf.load_config)
self.actionAbout.triggered.connect(self.exmain.show_about)
self.ch_name_col_list=[] #list to save channel name and color
self.ch_volt_div_list=[]##list to save volt divsss
self.Current_file_dialog=["",""]
self.ch_color_edit=0
self.col_edited=False
self.file_edited=False
self.all_plot_count =0
self.x_limit=[]
self.all_x_maxs=[0]
self.all_x_mins=[0]
self.x=[]
self.y=[]
self.x_scale=[]#will be used in shift
self.y_scale=[]
self.line_header=[]
self.t_base_list=[]
self.v_base_list=[]
self.multiplier_index_list=[]
self.matched_pos=-1
self.default_color_list=["#ffff00","#00ffff","#ff00ff","#ff0000","#00ff00","#0000ff","#7d7d00","#007d7d","#7d007d","#7d7d00","#550000","#005500","#000055","#ffaa7f"]
self.actionChannel_Information.triggered.connect(self.Open_Information_window)
self.vdial_mouse_press=0
self.shown_flag=True
self.drawn=False
self.rescalex_Extended_flag=False
self.Window_minimised=False
self.CH_select_combo.setEnabled(False) ###########enabled after 1st entry
self.CH_enable_sw.setEnabled(False)
self.multiplier_combo.setEnabled(False)
self.replotting=0 #used in v/div adjust
self.vdial.sliderReleased.connect(lambda: self.vdial_on_release(3))
self.v_spinbx.val_entered.connect(lambda: self.vdial_on_release(2))
self.v_unit_combo.currentIndexChanged.connect(self.v_unit_combo_changed)
self.t_dial.sliderReleased.connect(lambda: self.t_dial_value_chg(0))
self.t_spinbx.val_entered.connect(self.t_spin_entered)
self.vdial.valueChanged.connect(self.vdial_val_changed)
self.t_dial.valueChanged.connect(self.tdial_changed)
self.bypass_canvas_draw_on_reScalePlot=False####while initialisation and setting v_base and t_base
self.v_panel_Enabled(False)
self.t_panel_Enabled(False)
self.panel_Extra_Enabled(False)
self.v_step_vals=[1,2,5,10,20,50,100,200,500,1,2,5,10,20,50,100] #1,2,5,10,20,50,100,200,500mv ,1,2,5,10 v
self.t_step_vals=[2,4,8,20,40,80,200,400,800] #2,4,8,20,40,80,200,400,800 ns,2,4,8,20,40,80,200,400,800 us,2,4,8,20,40,80,200,400,800 ms ,2,4,8,20,40s
self.t_step_unit=["ns","us","ms","s"]
self.t_step_indx=0
self.t_unit_indx=0
self.v_step_indx=0
self.v_unit_indx=1
self.current_volt_per_div = 2
self.default_canvas_width=-1
self.default_canvas_height=-1
self.plotted_with_small_scaledsize=-1
self.plotted_in_range=False
self.initial_members_count=0
self.plotted_out_of_range=False
self.x_limit_rescaled_out_of_range=[0,0]
self.x_limit_rescaled_in_range_small=[0,0]
self.zoom_within_out_ranged=0
self.x_canvas_out_ranged=0
self.xtics_step_out_ranged=0
self.t_val=-1
self.popup_width_offset=int(self.screensize[0]*8.34*1e-3)
self.popup_height_offset=int(self.screensize[1]*(21/1080))
#self.popup_width_offset=17
#self.popup_height_offset=21
if self.conf.set_time_to_base_time!=None:
self.settings_set_to_time_base=self.conf.set_time_to_base_time
else:
self.settings_set_to_time_base=True
if self.conf.set_to_volt_base!=None:
self.settings_set_to_volt_base=self.conf.set_to_volt_base
else:
self.settings_set_to_volt_base=True
self.infoWin = Ui_infoWindow(self)
self.mWin=Ui_MeasureWindow(self)
self.fftW=FFT_handler(self)
self.fftW.initialise()
self.mplsmall.init_fig(self)#####################initialise small plot
self.mplsmall_FFT.init_fig(self)
'''if path.exists("set.sa"):
self.settings_file=open("set.sa",'r+')
self.settings_data=self.settings_file.readlines()
else:
if isinstance(self.win_pos, str)==False:
self.move(self.win_pos)'''
self.define_rubberband()
self.MplWidget.scrollArea.horizontalScrollBar().valueChanged.connect(self.draw_rubberbands)
self.cycle_g_key=0
self.tabWidget.currentChanged.connect(self.tabChanged)
self.main_panel_width=0
self.firstRun=True
self.pushButton.clicked.connect(self.exmain.test_func)
self.conf.get_position()
#########################################@EVENT HANDLING#################################################
def tabChanged(self):
if self.tabWidget.currentIndex()==1:
self.main_panel_frame.setVisible(False)
self.toolbar.setVisible(False)
self.mplsmall.setVisible(False)
self.fft_panel_frame.setVisible(True)
self.mplsmall_FFT.setVisible(True)
self.FFT_Widget.toolbar.setVisible(True)
else:
self.fft_panel_frame.setVisible(False)
self.FFT_Widget.toolbar.setVisible(False)
self.mplsmall_FFT.setVisible(False)
self.main_panel_frame.setVisible(True)
self.mplsmall.setVisible(True)
self.toolbar.setVisible(True)
def keyPressEvent(self, event):
if event.key()==Qt.Key_G:###g
self.grid_view_cycle()
elif event.key()==Qt.Key_Escape and self.exmain.picked_==True:
try:
self.exmain.picked_=False
self.exmain.p.remove()
self.refresh_plot()
except Exception:
print(sys.exc_info())
print("LINE NO:",format(sys.exc_info()[-1].tb_lineno))
elif event.key()==Qt.Key_Control:
self.fftW.ctrl_handle(True)
def keyReleaseEvent(self,e):
if e.key()==Qt.Key_Control:
self.fftW.ctrl_handle(False)
def resizeEvent(self,event):
QMainWindow.resizeEvent(self, event)
print("resize")
if self.shown_flag==True and hasattr(self, "resize_timer")==True:
if self.resize_timer.is_alive():
self.resize_timer.cancel()
self.resize_timer=threading.Timer(0.5,self.resize_thread)
else:
self.resize_timer=threading.Timer(0.5,self.resize_thread)
self.resize_timer.start()
else:
self.resize_timer=threading.Timer(0.5,self.resize_thread)
self.resize_timer.start()
def resize_thread(self):
if self.tabWidget.currentIndex()==0:
w = self.MplWidget.scrollArea.size().width()
h = self.MplWidget.scrollArea.size().height()
elif self.tabWidget.currentIndex()==1:
w = self.FFT_Widget.scrollArea.size().width()
h = self.FFT_Widget.scrollArea.size().height()
self.resize_self(w,h)
self.fftW.resize_self(self.shown_flag,w,h)
def resize_self(self,w,h):
if self.shown_flag==True and self.plotted_in_range==True and self.rescalex_Extended_flag!=True:
if w>self.width_during_scaling:
self.canvas.resize(w,(h-self.mplwidget_height_offset))
self.mplsmall.canvas.resize(w,self.mplsmall_window_height)
print("R1")
else:
#self.canvas.resize(self.canvas.size().width(),(h-self.mplwidget_height_offset))
self.mplsmall.canvas.resize(w,self.mplsmall_window_height)
print("R1E")
elif self.rescalex_Extended_flag==True:
self.rubberBand_reds_notDrawn=True
if w>self.width_during_scaling:
self.canvas.resize(w,(h-self.mplwidget_height_offset))
self.mplsmall.canvas.resize(w,self.mplsmall_window_height)
print("R2_1")
else:
self.canvas.resize(self.canvas.size().width(),(h-self.mplwidget_height_offset))
self.mplsmall.canvas.resize(w,self.mplsmall_window_height)
print("R2_2")
elif self.shown_flag==True:##INIT
self.canvas.resize(w,(h-self.mplwidget_height_offset))
self.fftW.canvas.resize(w,h-self.mplwidget_height_offset)
self.mplsmall.canvas.resize(w,self.mplsmall_window_height)
self.mplsmall_FFT.canvas.resize(w,self.mplsmall_window_height)#########fftw and the small of it resized on __init__
print("R3")
def showEvent(self, event):
QMainWindow.showEvent(self, event)
QApplication.processEvents()
print("Shown")
self.shown_flag=True
self.exmain.cursor_show_all()
if self.Window_minimised==False:
if self.tabWidget.currentIndex()==0:
w = self.MplWidget.scrollArea.size().width()
self.MplWidget.scrollArea.resize(w,(self.frame_3.size().height()-86))
self.FFT_Widget.scrollArea.resize(w,(self.frame_3.size().height()-86))
h = self.MplWidget.scrollArea.size().height()
elif self.tabWidget.currentIndex()==1:
w = self.FFT_Widget.scrollArea.size().width()
self.MplWidget.scrollArea.resize(w,(self.frame_3.size().height()-86))
self.FFT_Widget.scrollArea.resize(w,(self.frame_3.size().height()-86))
h = self.FFT_Widget.scrollArea.size().height()
self.default_canvas_width=w
self.default_canvas_height=h
self.canvas.resize(w,h)
self.fftW.canvas.resize(w,h)
#print("ScrollArea:",w," ",h)
self.mplsmall.canvas.resize(w,self.mplwidget_height_offset)
self.mplsmall_FFT.canvas.resize(w,self.mplwidget_height_offset)
elif self.Window_minimised==True:
self.Window_minimised==False
def changeEvent(self, event):
if event.type() == QEvent.WindowStateChange:
if self.windowState() & Qt.WindowMinimized:
self.Window_minimised=True
def closeEvent(self,e):
#self.save_win_pos()
self.conf.save_position()
self.log_file.close()
if self.infoWin.closed==False:
self.infoWin.close()
self.infoWin.deleteLater()
if self.mWin.closed==False:
self.mWin.close()
self.mWin.deleteLater()
if self.exmain.aboutwin.shown==True:
self.exmain.aboutwin.close()
self.exmain.aboutwin.deleteLater()
if self.fftW.zoomwin.shown==True:
self.fftW.zoomwin.close()
self.fftW.zoomwin.deleteLater()
self.deleteLater()
return super(Ui_MainWindow,self).closeEvent(e)
def On_Canvas_drawn(self,draw_event):
print("Draw_evt")
if self.canvas.size().width()<self.tabWidget.size().width():
self.canvas.resize(self.tabWidget.size().width(),self.tabWidget.size().height()-54)
self.mplsmall.canvas.resize(self.canvas.size().width(),self.mplwidget_height_offset)
for i in range(self.all_plot_count):
self.ch_name_col_list[i][1]=str(m_colors.to_hex(self.axes_list[i].get_lines()[0].get_color()))
#self.plot_axes.get_lines()[i].set_label(self.ch_name_col_list[i][0]) ##bug## when label changed from navigator tooolbar initially changes but target was to make it unchangable
self.px2pt=self.plot_axes.transData.inverted()################define axes transformation on resize
self.pt2px=self.plot_axes.transData
self.mplsmall_pt2px=self.mplsmall.axes.transData
self.mplsmall_px2pt=self.mplsmall.axes.transData.inverted()
self.draw_rubberbands()
self.exmain.refresh_bounds()
if self.exmain.picked_==True: #if any plot picked to mobve and somehow redrawn without moving it then the point is removed.
try:
self.exmain.p.remove()
except Exception:
print(sys.exc_info())
print("LINE NO:",format(sys.exc_info()[-1].tb_lineno))
if self.drawn==False:
self.exmain.cursor_show_all()
self.exmain.cursor_refresh_on_redraw()
#self.setProgress(100,True)
if self.rescalex_Extended_flag:
half_page_step=int(self.MplWidget.scrollArea.horizontalScrollBar().pageStep()/2)
print("MID_TO_SET",self.set_scrl_val)#####midCHECK
self.MplWidget.scrollArea.horizontalScrollBar().setValue(self.set_scrl_val-half_page_step)
self.drawn=True
if self.firstRun==True:
ytick=self.plot_axes.get_yticks(minor=False)
self.ydiv=ytick[1]-ytick[0]
print("ydiv",self.ydiv)
self.firstRun=False
for i in range(len(self.zeroline_visibility_list)):
self.exmain.draw_zeroline(i,self.zeroline_visibility_list[i])
#self.setProgress(0,False)
def t_spin_entered(self):
if self.t_spinbx.value()!=self.t_val:
self.t_dial_value_chg(1)
elif self.t_unit_combo.currentIndex()!=self.t_unit_indx:
self.t_dial_value_chg(1)
def zline_toggled(self):
zeroline_enabled=self.zero_line_enabled_sw.isChecked()
CH_selected=self.CH_select_combo.currentText()
member_match=-1
member_matched=0
print(self.ch_volt_div_list)
for members in self.ch_volt_div_list:
member_match+=1
if members[0]==CH_selected:
print("Channel selected:",members[0])
member_matched=1
break
if member_matched==1:
self.exmain.draw_zeroline(member_match,zeroline_enabled)
self.zeroline_visibility_list[member_match]=zeroline_enabled
###############################################@FILE OPEN@###################################
def file_open(self,MainWindow):
if self.fftW.zoomwin.isVisible():
self.fftW.zoomwin.close()
self.Current_file_dialog = QtWidgets.QFileDialog.getOpenFileName(self,"Select CSV file",self.last_folder,"CSV files( *.csv)")
if self.Current_file_dialog[0] != "":
self.last_folder=path.dirname(self.Current_file_dialog[0])
self.dialog = QtWidgets.QDialog()
self.dialog.ui = ch_color_diag()
self.dialog.ui.setupUi(self.dialog)
if (self.dialog.ui.comboBox.count()>self.all_plot_count):
self.dialog.ui.comboBox.setCurrentIndex(self.all_plot_count)
else:
run_num = self.all_plot_count-self.dialog.ui.comboBox.count()+1
combo_count=self.dialog.ui.comboBox.count()+1
for i in range(run_num):
self.dialog.ui.comboBox.addItem("CH"+str(combo_count+i))
self.dialog.ui.comboBox.setCurrentIndex(self.all_plot_count)
if self.all_plot_count<len(self.default_color_list):
self.dialog.ui.color_btn.setStyleSheet("background-color:"+self.default_color_list[self.all_plot_count])
else:
self.dialog.ui.color_btn.setStyleSheet("background-color:#ffffff")
self.dialog.ui.ok_btn.clicked.connect(self.Color_dialog_ok)
self.dialog.ui.Cancel_btn.clicked.connect(self.Color_dialog_Cancel)
self.dialog.ui.color_btn.clicked.connect(self.color_pick)
self.dialog.ui.comboBox.currentIndexChanged.connect(self.color_combo_change)
self.dialog.exec_()
else:
print("No file to plot")
########################################@File Open COLOLR DIALOG@#####################################
def Color_dialog_ok(self):
self.Current_ch_name= str(self.dialog.ui.comboBox.currentText())
btn_color = self.dialog.ui.color_btn.palette().color(1).name(0)
print(self.Current_ch_name)
print(btn_color)
self.dialog.done(1)
self.disconnect_receivers(self.CH_enable_sw,self.CH_enable_sw.toggled)
self.plot_from_path(self.Current_file_dialog[0],self.Current_ch_name,btn_color) #########funtion to plot form file location, chName, ch Color
def Color_dialog_Cancel(self):
print("No changes Done")
self.dialog.done(0)
def color_pick(self):
self.ch_color= QtWidgets.QColorDialog.getColor()
self.Current_ch_color=self.ch_color.name(0)
if str(self.ch_color.name(0)) != "#000000":
print("background-color:"+self.Current_ch_color)
self.dialog.ui.color_btn.setStyleSheet("background-color:"+self.Current_ch_color)
else:
print("No color Selected")
def color_combo_change(self):
ch_name=str(self.dialog.ui.comboBox.currentText())
matched_pos=-1
match=0
for members in self.ch_name_col_list:
matched_pos += 1
if members[0]==ch_name: ##if current channel matches with already plotted channel @MEMBER_MATCH
ch_color=members[1]
match=1
self.dialog.ui.color_btn.setStyleSheet("background-color:"+ch_color)
break
if match==0:
if self.all_plot_count<len(self.default_color_list):
self.dialog.ui.color_btn.setStyleSheet("background-color:"+self.default_color_list[self.all_plot_count])
else:
self.dialog.ui.color_btn.setStyleSheet("background-color:#ffffff")
#######################################@MENU EDIT COLOLR DIALOG@#######################################
def open_MenuColor_dialog(self): #pops in when window channel color select button is triggered
self.menu_col_dialog = QtWidgets.QDialog()
self.menu_col_dialog.ui = ch_color_diag()
self.menu_col_dialog.ui.setupUi(self.menu_col_dialog)
self.menu_col_dialog.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.menu_col_combo_change()
if (self.menu_col_dialog.ui.comboBox.count()<=self.all_plot_count):
run_num = self.all_plot_count-self.menu_col_dialog.ui.comboBox.count()
print("COMBOCOUNT:",self.menu_col_dialog.ui.comboBox.count())
print("ALL PLOT COUNT",self.all_plot_count)
print("RUN_NUM",run_num)
combo_count=self.menu_col_dialog.ui.comboBox.count()+1
for i in range(run_num):
self.menu_col_dialog.ui.comboBox.addItem("CH"+str(combo_count+i))
self.menu_col_dialog.ui.ok_btn.clicked.connect(self.MenuColor_dialog_ok)
self.menu_col_dialog.ui.Cancel_btn.clicked.connect(self.MenuColor_dialog_Cancel)
self.menu_col_dialog.ui.comboBox.currentIndexChanged.connect(self.menu_col_combo_change)
self.menu_col_dialog.ui.color_btn.clicked.connect(self.MenuColor_pick)
self.menu_col_dialog.exec_()
def MenuColor_dialog_ok(self):
self.Current_ch_name= str(self.menu_col_dialog.ui.comboBox.currentText())
btn_color = self.menu_col_dialog.ui.color_btn.palette().color(1).name(0)
print(self.Current_ch_name)
print(btn_color)
self.menu_col_dialog.done(1)
self.ch_color_edit=1
if self.Current_file_dialog[0] !="":
#self.setProgress(0,True)
self.plot_from_path(self.Current_file_dialog[0],self.Current_ch_name,btn_color)
def MenuColor_dialog_Cancel(self):
self.menu_col_dialog.done(1)
def MenuColor_pick(self):
self.ch_color= QtWidgets.QColorDialog.getColor()
self.Current_ch_color=self.ch_color.name(0)
if str(self.ch_color.name(0)) != "#000000":
print("background-color:"+self.Current_ch_color)
self.menu_col_dialog.ui.color_btn.setStyleSheet("background-color:"+self.Current_ch_color)
else:
print("No color Selected")
def menu_col_combo_change(self):
ch_name=str(self.menu_col_dialog.ui.comboBox.currentText())
matched_pos=-1
match=0
for members in self.ch_name_col_list:
matched_pos += 1
if members[0]==ch_name: ##if current channel matches with already plotted channel @MEMBER_MATCH
ch_color=members[1]
match=1
self.menu_col_dialog.ui.color_btn.setStyleSheet("background-color:"+ch_color)
break
if match==0:
if self.all_plot_count<len(self.default_color_list):
self.menu_col_dialog.ui.color_btn.setStyleSheet("background-color:"+self.default_color_list[self.all_plot_count])
else:
self.menu_col_dialog.ui.color_btn.setStyleSheet("background-color:#ffffff")
#########################################@INFORMATION WINDOW@##############################################
def Open_Information_window(self):
#MainWindow.hide()
row_num=0
needs_size_update=False
if self.initial_members_count!=len(self.ch_name_col_list) or self.col_edited==True or self.file_edited==True:
if self.infoWin.shown==True:
Each_Row_height,Actual_win_height_0_row,table_height_offset=self.info_win_read_dimentions()
elif self.infoWin.was_shown:
Each_Row_height=self.infoWin.Each_Row_height_saved
Actual_win_height_0_row=self.infoWin.Actual_win_height_0_row_saved
table_height_offset=self.infoWin.table_height_offset_saved
for i in range(self.infoWin.tableWidget.rowCount()):
self.infoWin.tableWidget.removeRow(0)
self.infoWin.first_run=True
needs_size_update=True
if self.infoWin.first_run==True:
for members in self.ch_name_col_list:
self.infoWin.tableWidget.insertRow(row_num)
for i in range(len(members)):
if i!=1:
self.infoWin.tableWidget.setItem(row_num,i,QtWidgets.QTableWidgetItem(members[i]))
else:
self.infoWin.tableWidget.setItem(row_num,i,QtWidgets.QTableWidgetItem(members[i]))
self.infoWin.tableWidget.item(row_num,i).setBackground(QtGui.QColor(members[i]))
self.infoWin.tableWidget.item(row_num,i).setTextAlignment(Qt.AlignCenter)
print(row_num," ",i," ",members[i])
row_num +=1
self.initial_members_count=row_num
self.infoWin.tableWidget.resizeColumnsToContents()
self.infoWin.tableWidget.resizeRowsToContents()
self.infoWin.first_run=False
if needs_size_update:
if self.infoWin.shown==True or self.infoWin.was_shown==True:
TH=int((self.infoWin.tableWidget.rowCount()*Each_Row_height)+Actual_win_height_0_row)
print("TH=",TH)
self.infoWin.resize_by_func=True
#self.infoWin.tableWidget_Infow_size_same=False
self.infoWin.tableWidget.resize(self.infoWin.tableWidget.size().width(),TH+table_height_offset)
self.infoWin.resize(self.infoWin.size().width(),TH)
print("InfoWin Size Updated")
self.infoWin.activateWindow()
self.infoWin.show()
def info_win_read_dimentions(self):
info_Win_Height=self.infoWin.size().height()
table_height=self.infoWin.tableWidget.height()
table_height_offset=table_height-info_Win_Height
T_row_Height=0
row_count=self.infoWin.tableWidget.rowCount()
for i in range(row_count):
T_row_Height+=self.infoWin.tableWidget.rowHeight(i)
Each_Row_height=T_row_Height/row_count
Actual_win_height_0_row=info_Win_Height-T_row_Height
print("AWH=",Actual_win_height_0_row," Prow_count=",row_count,"PT_row_Height=",T_row_Height,"info_Win_Height=",info_Win_Height,"table_height=",table_height)
return Each_Row_height,Actual_win_height_0_row,table_height_offset
############################################MEASUREMENT_WINDOW#############################################
def Open_measure_window(self):
measure_Thread=threading.Thread(target=self.mWin.Update_UI())
measure_Thread.start()
measure_Thread.join()
self.mWin.show()
#############################################@PLOTTING@####################################################
def plot_from_path(self,path,ch_name,ch_color):
####TODO drawing Diag
self.member_match=0 #bool to check if Channel_name_color_array (self.ch_name_col_list) contains the channel i.e currently showed on plot @MEMBER_MATCH
self.matched_pos=-1
for members in self.ch_name_col_list:
print(len(members))
self.matched_pos += 1
if members[0]==ch_name: ##if current channel matches with already plotted channel @MEMBER_MATCH
members[1]=ch_color
if self.ch_color_edit==1:
path=members[2] ###saved path will be used if channel is on edit @CHANNEL_EDIT
else:
members[2]=path ##else 'path' passed to this function will be used as its new path @CHANNEL_EDIT
print("Member_match",self.matched_pos)
print(path)
self.member_match=1
break
if self.member_match==0 and self.ch_color_edit==0: # if member doesnot match and is a fresh entry @@@FRESH ENTRY
self.ch_name_col_list.append([ch_name,ch_color,path]) ##########appended to
self.ch_volt_div_list.append([ch_name,2,2,1,0,0])###ch_name,volt/div,volt_unit_index(v_unit_indx),enabled_bool,SHIFT_bool,yshift
#############Disconnections###############
self.disconnect_receivers(self.CH_select_combo, self.CH_select_combo.currentIndexChanged)
self.disconnect_receivers(self.CH_move_combo, self.CH_move_combo.currentIndexChanged)
self.disconnect_receivers(self.fft_ch_select, self.fft_ch_select.currentIndexChanged)
self.disconnect_receivers(self.CH_cursor_combo,self.CH_cursor_combo.currentIndexChanged )
self.disconnect_receivers(self.multiplier_combo, self.multiplier_combo.currentIndexChanged)
self.disconnect_receivers(self.zero_line_enabled_sw, self.zero_line_enabled_sw.toggled)
#############Changes###############
self.fft_ch_select.addItem(ch_name)
self.fft_ch_select.setCurrentIndex(self.fft_ch_select.count()-1)
self.CH_move_combo.addItem(ch_name)
self.CH_move_combo.setCurrentIndex(self.CH_move_combo.count()-1)
self.CH_cursor_combo.addItem(ch_name)
self.CH_cursor_combo.setCurrentIndex(self.CH_cursor_combo.count()-1)
self.CH_select_combo.addItem(ch_name)
self.CH_select_combo.setCurrentIndex(self.CH_select_combo.count()-1)
self.multiplier_combo.setCurrentIndex(0)
#############Connections###############
self.CH_move_combo_connection=self.CH_move_combo.currentIndexChanged.connect(self.exmain.CH_move_combo_change)
self.fft_ch_select_connection=self.fft_ch_select.currentIndexChanged.connect(self.fftW.fft_CH_changed)
self.CH_cursor_combo_connection=self.CH_cursor_combo.currentIndexChanged.connect(self.exmain.CH_cursor_combo_change)
self.multiplier_combo_connection=self.multiplier_combo.currentIndexChanged.connect(self.onMultiplierChanged)
self.all_plot_count += 1
print( self.ch_name_col_list)
if self.member_match==0 and self.ch_color_edit==1:
self.ch_color_edit=0
return
if self.all_plot_count==1:
if self.ch_color_edit==0:
if self.matched_pos==0 and self.member_match==1:
self.disconnect_receivers(self.CH_select_combo, self.CH_select_combo.currentIndexChanged)
self.disconnect_receivers(self.zero_line_enabled_sw, self.zero_line_enabled_sw.toggled)
self.axes_list[0].clear()
self.line_list=[]
self.plot_list=[]
self.axes_list=[]
self.x=[]
| |
icon to TOIF format"""
# TODO: move this to python-trezor at some point
DIM = 32
icon = icon.resize((DIM, DIM), Image.LANCZOS)
# remove alpha channel, replace with black
bg = Image.new("RGBA", icon.size, (0, 0, 0, 255))
icon = Image.alpha_composite(bg, icon)
# process pixels
pix = icon.load()
data = bytes()
for y in range(DIM):
for x in range(DIM):
r, g, b, _ = pix[x, y]
c = ((r & 0xF8) << 8) | ((g & 0xFC) << 3) | ((b & 0xF8) >> 3)
data += struct.pack(">H", c)
z = zlib.compressobj(level=9, wbits=10)
zdata = z.compress(data) + z.flush()
zdata = zdata[2:-4] # strip header and checksum
return zdata
def coindef_from_dict(coin):
proto = CoinDef()
for fname, _, fflags in CoinDef.FIELDS.values():
val = coin.get(fname)
if val is None and fflags & protobuf.FLAG_REPEATED:
val = []
elif fname == "signed_message_header":
val = val.encode()
elif fname == "hash_genesis_block":
val = bytes.fromhex(val)
setattr(proto, fname, val)
return proto
def serialize_coindef(proto, icon):
proto.icon = icon
buf = io.BytesIO()
protobuf.dump_message(buf, proto)
return buf.getvalue()
def sign(data):
h = sha256(data).digest()
sign_key = ed25519.SigningKey(b"A" * 32)
return sign_key.sign(h)
# ====== click command handlers ======
@click.group()
@click.option(
"--colors/--no-colors",
"-c/-C",
default=sys.stdout.isatty(),
help="Force colored output on/off",
)
def cli(colors):
global USE_COLORS
USE_COLORS = colors
@cli.command()
# fmt: off
@click.option("--backend/--no-backend", "-b", default=False, help="Check blockbook/bitcore responses")
@click.option("--icons/--no-icons", default=True, help="Check icon files")
@click.option("-d", "--show-duplicates", type=click.Choice(("all", "nontoken", "errors")),
default="errors", help="How much information about duplicate shortcuts should be shown.")
# fmt: on
def check(backend, icons, show_duplicates):
"""Validate coin definitions.
Checks that every btc-like coin is properly filled out, reports duplicate symbols,
missing or invalid icons, backend responses, and uniform key information --
i.e., that all coins of the same type have the same fields in their JSON data.
Uniformity check ignores NEM mosaics and ERC20 tokens, where non-uniformity is
expected.
The `--show-duplicates` option can be set to:
- all: all shortcut collisions are shown, including colliding ERC20 tokens
- nontoken: only collisions that affect non-ERC20 coins are shown
- errors: only collisions between non-ERC20 tokens are shown. This is the default,
as a collision between two or more non-ERC20 tokens is an error.
In the output, duplicate ERC tokens will be shown in cyan; duplicate non-tokens
in red. An asterisk (*) next to symbol name means that even though it was detected
as duplicate, it is still included in results.
The collision detection checks that SLIP44 numbers don't collide between different
mainnets (testnet collisions are allowed), that `address_prefix` doesn't collide
with Bitcoin (other collisions are reported as warnings). `address_prefix_p2sh`
is also checked but we have a bunch of collisions there and can't do much
about them, so it's not an error.
In the collision checks, Bitcoin is shown in red, other mainnets in blue,
testnets in green and unsupported networks in gray, marked with `(X)` for
non-colored output.
"""
if backend and requests is None:
raise click.ClickException("You must install requests for backend check")
if icons and not CAN_BUILD_DEFS:
raise click.ClickException("Missing requirements for icon check")
defs, buckets = coin_info.coin_info_with_duplicates()
all_checks_passed = True
print("Checking BTC-like coins...")
if not check_btc(defs.bitcoin):
all_checks_passed = False
print("Checking Ethereum networks...")
if not check_eth(defs.eth):
all_checks_passed = False
if show_duplicates == "all":
dup_level = logging.DEBUG
elif show_duplicates == "nontoken":
dup_level = logging.INFO
else:
dup_level = logging.ERROR
print("Checking unexpected duplicates...")
if not check_dups(buckets, dup_level):
all_checks_passed = False
nontoken_dups = [coin for coin in defs.as_list() if "dup_key_nontoken" in coin]
if nontoken_dups:
nontoken_dup_str = ", ".join(
highlight_key(coin, "red") for coin in nontoken_dups
)
print_log(logging.ERROR, "Non-token duplicate keys: " + nontoken_dup_str)
all_checks_passed = False
if icons:
print("Checking icon files...")
if not check_icons(defs.bitcoin):
all_checks_passed = False
if backend:
print("Checking backend responses...")
if not check_backends(defs.bitcoin):
all_checks_passed = False
print("Checking segwit fields...")
if not check_segwit(defs.bitcoin):
all_checks_passed = False
print("Checking key uniformity...")
for cointype, coinlist in defs.items():
if cointype in ("erc20", "nem"):
continue
if not check_key_uniformity(coinlist):
all_checks_passed = False
if not all_checks_passed:
print("Some checks failed.")
sys.exit(1)
else:
print("Everything is OK.")
@cli.command()
# fmt: off
@click.option("-o", "--outfile", type=click.File(mode="w"), default="-")
@click.option("-s/-S", "--support/--no-support", default=True, help="Include support data for each coin")
@click.option("-p", "--pretty", is_flag=True, help="Generate nicely formatted JSON")
@click.option("-l", "--list", "flat_list", is_flag=True, help="Output a flat list of coins")
@click.option("-i", "--include", metavar="FIELD", multiple=True, help="Include only these fields")
@click.option("-e", "--exclude", metavar="FIELD", multiple=True, help="Exclude these fields")
@click.option("-I", "--include-type", metavar="TYPE", multiple=True, help="Include only these categories")
@click.option("-E", "--exclude-type", metavar="TYPE", multiple=True, help="Exclude these categories")
@click.option("-f", "--filter", metavar="FIELD=FILTER", multiple=True, help="Include only coins that match a filter")
@click.option("-F", "--filter-exclude", metavar="FIELD=FILTER", multiple=True, help="Exclude coins that match a filter")
@click.option("-t", "--exclude-tokens", is_flag=True, help="Exclude ERC20 tokens. Equivalent to '-E erc20'")
@click.option("-d", "--device", metavar="NAME", help="Only include coins supported on a given device")
# fmt: on
def dump(
outfile,
support,
pretty,
flat_list,
include,
exclude,
include_type,
exclude_type,
filter,
filter_exclude,
exclude_tokens,
device,
):
"""Dump coin data in JSON format
This file is structured the same as the internal data. That is, top-level object
is a dict with keys: 'bitcoin', 'eth', 'erc20', 'nem' and 'misc'. Value for each
key is a list of dicts, each describing a known coin.
If '--list' is specified, the top-level object is instead a flat list of coins.
\b
Fields are category-specific, except for four common ones:
- 'name' - human-readable name
- 'shortcut' - currency symbol
- 'key' - unique identifier, e.g., 'bitcoin:BTC'
- 'support' - a dict with entries per known device
To control the size and properties of the resulting file, you can specify whether
or not you want pretty-printing and whether or not to include support data with
each coin.
You can specify which categories and which fields will be included or excluded.
You cannot specify both include and exclude at the same time. Include is "stronger"
than exclude, in that _only_ the specified fields are included.
You can also specify filters, in the form '-f field=value' (or '-F' for inverse
filter). Filter values are case-insensitive and support shell-style wildcards,
so '-f name=bit*' finds all coins whose names start with "bit" or "Bit".
"""
if exclude_tokens:
exclude_type = ("erc20",)
if include and exclude:
raise click.ClickException(
"You cannot specify --include and --exclude at the same time."
)
if include_type and exclude_type:
raise click.ClickException(
"You cannot specify --include-type and --exclude-type at the same time."
)
coins = coin_info.coin_info()
support_info = coin_info.support_info(coins.as_list())
if support:
for category in coins.values():
for coin in category:
coin["support"] = support_info[coin["key"]]
# filter types
if include_type:
coins_dict = {k: v for k, v in coins.items() if k in include_type}
else:
coins_dict = {k: v for k, v in coins.items() if k not in exclude_type}
# filter individual coins
include_filters = [f.split("=", maxsplit=1) for f in filter]
exclude_filters = [f.split("=", maxsplit=1) for f in filter_exclude]
# always exclude 'address_bytes', not encodable in JSON
exclude += ("address_bytes",)
def should_include_coin(coin):
for field, filter in include_filters:
filter = filter.lower()
if field not in coin:
return False
if not fnmatch.fnmatch(str(coin[field]).lower(), filter):
return False
for field, filter in exclude_filters:
filter = filter.lower()
if field not in coin:
continue
if fnmatch.fnmatch(str(coin[field]).lower(), filter):
return False
if device:
is_supported = support_info[coin["key"]].get(device, None)
if not is_supported:
return False
return True
def modify_coin(coin):
if include:
return {k: v for k, v in coin.items() if k in include}
else:
return {k: v for k, v in coin.items() if k not in exclude}
for key, coinlist in coins_dict.items():
coins_dict[key] = [modify_coin(c) for c in coinlist if should_include_coin(c)]
if flat_list:
output = sum(coins_dict.values(), [])
else:
output = coins_dict
with outfile:
indent = 4 if pretty else None
json.dump(output, outfile, indent=indent, sort_keys=True)
outfile.write("\n")
@cli.command()
@click.option("-o", "--outfile", type=click.File(mode="w"), default="./coindefs.json")
def coindefs(outfile):
"""Generate signed coin definitions for python-trezor and others
This is currently unused but should enable us to add new coins without having to
update firmware.
"""
coins = coin_info.coin_info().bitcoin
coindefs = {}
for coin in coins:
key = coin["key"]
icon = Image.open(coin["icon"])
ser = serialize_coindef(coindef_from_dict(coin), convert_icon(icon))
sig = sign(ser)
definition = (sig + ser).hex()
coindefs[key] = definition
with outfile:
json.dump(coindefs, outfile, indent=4, sort_keys=True)
outfile.write("\n")
@cli.command()
# fmt: off
@click.argument("paths", metavar="[path]...", nargs=-1)
@click.option("-o", "--outfile", type=click.File("w"), help="Alternate output file")
@click.option("-v", "--verbose", is_flag=True, help="Print rendered file names")
# fmt: on
def render(paths, outfile, verbose):
"""Generate source code from Mako templates.
For every "foo.bar.mako" filename passed, runs the template and
saves the result as "foo.bar". For every directory name | |
suites mode
announcement -- The description/announcement of the project
show_announcement -- True to show the announcement/description on
project page and false otherwise
is_completed -- True if the project is marked as completed and
false otherwise
completed_on -- The date/time when the project was marked as
completed (as UNIX timestamp)
"""
cache = {}
def _settle_attributes(self, attributes):
self.id = attributes['id']
self.name = attributes['name']
self.url = attributes['url']
self.suite_mode = attributes['suite_mode']
self.announcement = attributes['announcement']
self.show_announcement = attributes['show_announcement']
self.is_completed = attributes['is_completed']
self.completed_on = attributes['completed_on']
self.custom_case_fields = []
for custom_case_field in Testrail.case_fields():
if custom_case_field.present_in_project(self.id):
self.custom_case_fields.append(custom_case_field)
self.custom_result_fields = []
for custom_result_field in Testrail.result_fields():
if custom_result_field.present_in_project(self.id):
self.custom_result_fields.append(custom_result_field)
self._milestones = None
self._suites = None
@staticmethod
def get_one(project_id):
return Project(TestrailAPI.get_project(project_id))
def update(self, name=None, announcement=None, show_announcement=None,
suite_mode=None, is_completed=None):
"""
Change project parameters.
:rtype: None
"""
data = {}
if name is not None:
data['name'] = name
if announcement is not None:
data['announcement'] = announcement
if show_announcement is not None:
data['show_announcement'] = show_announcement
if suite_mode is not None:
data['suite_mode'] = suite_mode
if is_completed is not None:
data['is_completed'] = is_completed
self._settle_attributes(TestrailAPI.update_project(self.id, **data))
def delete(self):
"""
Wipe out this project.
!!! Deleting a project cannot be undone and also permanently deletes
all test suites & cases, test runs & results and everything else
that is part of the project.
"""
TestrailAPI.delete_project(self.id)
self._settle_attributes(defaultdict(lambda: None))
def configs(self):
"""
Get project configuration groups
:rtype: list of [ConfigGroup]
"""
return [ConfigGroup(c) for c in TestrailAPI.get_configs(self.id)]
def milestones(self, is_completed=None):
"""
Get list of milestones in this project.
:arg is_completed: True - to return only completed milestones.
False - only incomplete.
None - return all milestones.
:type is_completed: bool
:rtype: list of [Milestone]
"""
if self._milestones is not None:
return self._milestones
else:
self._milestones = [
Milestone(m) for m in
TestrailAPI.get_milestones(self.id, is_completed)
]
return self._milestones
def get_milestone_by_name(self, milestone_name):
"""
:type milestone_name: str
:rtype: Milestone
"""
for m in self.milestones():
if m.name == milestone_name:
return m
raise NotFound('No milestone with name: %s' % milestone_name)
def add_milestone(self, name, description='', due_on=None):
"""
Creates new milestone.
Returns newly created milestone object.
:arg name: Name of new milestone
:arg due_on: The due date of the milestone
:arg description: Description of new milestone
:type name: str
:type description: str
:type due_on: datetime.datetime
:rtype: Milestone
"""
data = {
'name': name,
'description': description
}
if due_on is not None:
data['due_on'] = int(time.mktime(
due_on.timetuple()
))
return Milestone(TestrailAPI.add_milestone(self.id, **data))
def suites(self):
"""
Returns list of all suites in the project.
:rtype: list of [Suite]
"""
if self._suites is not None:
return self._suites
else:
self._suites = [Suite(s) for s in TestrailAPI.get_suites(self.id)]
return self._suites
def get_suite_by_name(self, suite_name):
"""
:type suite_name: str
:rtype: Suite
"""
for s in self.suites():
if s.name == suite_name:
return s
raise NotFound('No suite with name: %s' % suite_name)
def add_suite(self, name, description=''):
"""
Creates new test suite.
Returns newly created test suite object.
:type name: str
:type description: str
:rtype: Suite
"""
data = {
'name': name,
'description': description
}
return Suite(TestrailAPI.add_suite(self.id, **data))
def plans(self, milestones=None, limit=None, offset=None,
is_completed=None, created_by=None, created_after=None,
created_before=None):
"""
Returns list of test plans in the project.
:arg created_after: Only return test runs created after this date.
:arg created_before: Only return test runs created before this date.
:arg created_by: A comma-separated list of creators names to filter by.
:arg is_completed: True to return completed test runs only.
False to return active test runs only.
:arg limit: Limit the result to 'limit' test runs.
:arg offset: Skip 'offset' records.
:arg milestones: A comma-separated list of milestone names to filter by.
:type created_after: datetime.datetime
:type created_before: datetime.datetime
:type created_by: list of [str]
:type is_completed: bool
:type limit: int
:type offset: int
:type milestones: list of [str]
:rtype: list of [Plan]
"""
data = {
'milestone_id': milestones,
'limit': limit,
'offset': offset,
'is_completed': is_completed,
'created_by': created_by,
'created_after': created_after,
'created_before': created_before
}
if milestones is not None:
data['milestone_id'] = [
str(self.get_milestone_by_name(milestone).id)
for milestone in milestones
]
if created_by is not None:
data['created_by'] = [
str(Testrail.get_user_by_name(user).id)
for user in created_by
]
if created_after is not None:
data['created_after'] = int(time.mktime(
created_after.timetuple()
))
if created_before is not None:
data['created_before'] = int(time.mktime(
created_before.timetuple()
))
return [Plan(p) for p in TestrailAPI.get_plans(self.id, **data)]
def add_plan(self):
raise NotImplementedError
def runs(self, suites=None, milestones=None, limit=None, offset=None,
is_completed=None, created_by=None, created_after=None,
created_before=None):
"""
Returns list of test runs in the project. (Not those which are part
of a test plan).
:arg created_after: Only return test runs created after this date.
:arg created_before: Only return test runs created before this date.
:arg created_by: A comma-separated list of creators names to filter by.
:arg is_completed: True to return completed test runs only.
False to return active test runs only.
:arg limit: Limit the result to 'limit' test runs.
:arg offset: Skip 'offset' records.
:arg milestones: A comma-separated list of milestone names to filter by.
:arg suites: A list of test suite names to filter by.
:type created_after: datetime.datetime
:type created_before: datetime.datetime
:type created_by: list of [str]
:type is_completed: bool
:type limit: int
:type offset: int
:type milestones: list of [str]
:type suites: list of [str]
:rtype: list of [Run]
"""
data = {
'suite_id': suites,
'milestone_id': milestones,
'limit': limit,
'offset': offset,
'is_completed': is_completed,
'created_by': created_by,
'created_after': created_after,
'created_before': created_before
}
if suites is not None:
data['suite_id'] = [
str(self.get_suite_by_name(suite).id) for suite in suites
]
if milestones is not None:
data['milestone_id'] = [
str(self.get_milestone_by_name(milestone).id)
for milestone in milestones
]
if created_by is not None:
data['created_by'] = [
str(Testrail.get_user_by_name(user).id)
for user in created_by
]
if created_after is not None:
data['created_after'] = int(time.mktime(
created_after.timetuple()
))
if created_before is not None:
data['created_before'] = int(time.mktime(
created_before.timetuple()
))
return [Run(p) for p in TestrailAPI.get_runs(self.id,
**data)]
def add_run(self, name, suite, description='',
milestone=None, assignedto=None,
include_all=True, cases=None):
"""
Creates new test run.
Returns newly created run object.
:arg name: Name of new test run
:arg description: Description of new test run
:arg suite: Name of the suite to create tesr run from
:arg milestone: Name of the milestone to link run to
:arg assignedto: Name of the user to assing run to
:arg include_all: if True all cases in suite will be included in run
:arg cases: if include_all is False - include only this cases
:type name: str
:type description: str
:type suite: str
:type milestone: str
:type assignedto: str
:type include_all: bool
:type cases: list of [Cases]
:rtype: Run
"""
data = {
'name': name,
'suite_id': self.get_suite_by_name(suite).id,
'description': description,
}
if milestone is not None:
data['milestone_id'] = self.get_milestone_by_name(milestone).id
if assignedto is not None:
data['assignedto_id'] = Testrail.get_user_by_name(assignedto).id
if include_all:
data['include_all'] = True
else:
data['include_all'] = False
data['case_ids'] = [str(c.id) for c in cases]
return Run(TestrailAPI.add_run(self.id, **data))
class ConfigGroup(object):
def __init__(self, attributes):
self.id = attributes['id']
self.name = attributes['name']
self.project_id = attributes['project_id']
self.configs = {}
for k in attributes['configs']:
k['id'] = k['name']
class Milestone(_TestrailObject):
"""
Milestone container
Module Attributes:
project -- Project object the milestone belongs to
Testrail Attributes:
id -- The unique ID of the milestone
project_id -- The ID of the project the milestone belongs to
name -- The name of the milestone
description -- The description of the milestone
url -- The address/URL of the milestone in the user interface
due_on -- The due date/time of the milestone (as UNIX timestamp)
is_completed -- True if the milestone is marked as completed and false
otherwise
completed_on -- The date/time when the milestone was marked as completed
(as UNIX timestamp)
"""
cache = {}
def _settle_attributes(self, attributes):
self.id = attributes['id']
self.project_id = attributes['project_id']
self.name = attributes['name']
self.description = attributes['description']
self.url = attributes['url']
self.due_on_stamp = attributes['due_on']
try:
self.due_on = datetime.datetime.fromtimestamp(float(attributes['due_on']))
except TypeError:
self.duo_on = None
self.is_completed = attributes['is_completed']
self.completed_on = attributes['completed_on']
@property
def project(self):
return Testrail.get_project_by_id(self.project_id)
@staticmethod
def get_one(milestone_id):
return Milestone(TestrailAPI.get_milestone(milestone_id))
def update(self, name=None, description=None, due_on=None,
is_completed=None):
"""
Change milestone parameters.
:rtype: None
"""
data = {}
if name is not None:
data['name'] = name
if description is not None:
data['description'] = description
if due_on is not None:
data['due_on'] = due_on
if is_completed | |
#!/usr/bin/env python3
import mysql.connector as mysql
import datetime
from math import floor
from threading import RLock
from configparser import ConfigParser
class PlaylistDatabase():
'''
This database is designed to manage songs played by a
internet radio station. It stores the web address of the station,
some details about it, and the playlist of songs.
The playlist includes a link to a youtube video of the song.
'''
def _init_database_schema(self,commit=True):
'''
!!! ALL EXISTING DATA IS LOST WHEN USING THIS FUNCTION !!!
Initialize the database. This consists of:
* Dropping all relevant tables.
* Creating new empty tables.
'''
print('Dropping...')
try:
self._cur.execute('drop database PlaylistDB')
except mysql.errors.DatabaseError:
#print('No database exists.')
pass
self._cur.execute('create database PlaylistDB')
self._cur.execute('use PlaylistDB')
self._cur.execute('''CREATE TABLE IF NOT EXISTS Artist (
id INTEGER NOT NULL AUTO_INCREMENT UNIQUE,
artist_name VARCHAR(256) UNIQUE NOT NULL,
PRIMARY KEY (id),
KEY (artist_name)
)''')
self._cur.execute('''CREATE TABLE IF NOT EXISTS Album (
id INTEGER NOT NULL AUTO_INCREMENT UNIQUE,
album_name VARCHAR(256) NOT NULL,
artist_id INTEGER NOT NULL,
PRIMARY KEY (id),
FOREIGN KEY (artist_id) REFERENCES Artist(id) ON UPDATE CASCADE,
UNIQUE(album_name,artist_id)
)''')
self._cur.execute('''CREATE TABLE IF NOT EXISTS Track (
id INTEGER NOT NULL AUTO_INCREMENT UNIQUE,
track_name VARCHAR(256) NOT NULL,
youtube_link TEXT,
filesystem_link TEXT,
album_id INTEGER NOT NULL,
artist_id INTEGER NOT NULL,
PRIMARY KEY (id),
FOREIGN KEY (album_id) REFERENCES Album(id) ON UPDATE CASCADE,
FOREIGN KEY (artist_id) REFERENCES Artist(id) ON UPDATE CASCADE ,
KEY (track_name),
UNIQUE(track_name,album_id,artist_id)
)''')
self._cur.execute('''CREATE TABLE IF NOT EXISTS Station (
id INTEGER NOT NULL AUTO_INCREMENT UNIQUE,
station_name VARCHAR(256) NOT NULL UNIQUE,
web_address TEXT,
ignore_artists TEXT,
ignore_titles TEXT,
youtube_playlist_id TEXT,
active BOOL NOT NULL,
PRIMARY KEY (id),
KEY (station_name)
)''')
self._cur.execute('''CREATE TABLE IF NOT EXISTS Playlist (
id INTEGER NOT NULL AUTO_INCREMENT UNIQUE,
track_id INTEGER NOT NULL,
station_id INTEGER NOT NULL,
play_time DATETIME NOT NULL,
PRIMARY KEY (id),
KEY (station_id),
FOREIGN KEY (track_id) REFERENCES Track(id) ON UPDATE CASCADE,
FOREIGN KEY (station_id) REFERENCES Station(id) ON UPDATE CASCADE,
UNIQUE(track_id,station_id,play_time)
)''')
if commit:
self._conn.commit()
def _get_all_stations(self):
self._cur.execute('''SELECT * from Station''')
stations = self._cur.fetchall()
return stations
def _get_station_id_from_name(self,name):
self._cur.execute('''SELECT Station.id from Station where Station.station_name = %s''',(name,))
try:
station_id = self._cur.fetchone()[0]
except TypeError:
# LookupError seems better here
raise LookupError('Station: ' + str(name) + ' could not be found.')
#print('station_id: ' + str(station_id))
return station_id
def _make_artist(self,name,get_id=True,commit=True):
'''
Create an artist in the table.
For artists we only have a name.
'''
self._cur.execute('''
INSERT IGNORE INTO Artist(artist_name)
VALUES ( %s )''', (name,)
)
# If they're doing a bunch of makes they might not want
# to commit after each one
if commit:
self._conn.commit()
if get_id:
self._cur.execute('''
SELECT Artist.id FROM Artist WHERE
Artist.artist_name=%s''',(name,))
return self._cur.fetchone()[0]
def _make_album(self,artist_id,album,get_id=True,commit=True):
'''
Create an album in the table.
'''
# Get the artist id
self._cur.execute('''
INSERT IGNORE INTO Album(album_name,artist_id)
VALUES ( %s, %s )''', (album,artist_id)
)
# If they're doing a bunch of makes they might not want
# to commit after each one
if commit:
self._conn.commit()
if get_id:
self._cur.execute('''
SELECT Album.id FROM Album WHERE
Album.album_name=%s AND Album.artist_id=%s
''',(album,artist_id))
return self._cur.fetchone()[0]
def _make_track(self,name,album_id,artist_id,yt_link='',fs_link='',get_id=True,commit=True):
'''
Given a track name, ablum ID,and an artist ID, make a track in the
'Track' table. Optionally a youtube URL or filesystem location can also be specified.
'''
# We're doing a 'OR REPLACE' because maybe we're updating a track with a
# new youtube or filesystem link.
self._cur.execute('''
INSERT INTO Track (track_name,youtube_link,filesystem_link,album_id,artist_id)
VALUES( %s, %s, %s, %s, %s ) ON DUPLICATE KEY UPDATE
youtube_link=VALUES(youtube_link),filesystem_link=VALUES(filesystem_link)''',
(name,yt_link,fs_link,album_id,artist_id)
)
# If they're doing a bunch of makes they might not want
# to commit after each one
if commit:
self._conn.commit()
if get_id:
self._cur.execute('''
SELECT Track.id FROM Track WHERE
Track.track_name=%s AND
Track.youtube_link=%s AND
Track.filesystem_link=%s AND
Track.album_id=%s AND
Track.artist_id=%s''',(name,yt_link,fs_link,album_id,artist_id))
return self._cur.fetchone()[0]
def _add_playlist_entry(self,station_id,track_id,play_time,commit=True):
'''
Given a station ID, track ID, and a play time (a string date)
create a new row in the corresponding playlist table
'''
# No 'INSERT OR REPLACE INTO' because this should be unique based on the play times
self._cur.execute('''
INSERT INTO Playlist (track_id,station_id,play_time)
VALUES (%s, %s, %s)
''', (track_id,station_id,play_time)
)
# If they're doing a bunch of makes they might not want
# to commit after each one
if commit:
self._conn.commit()
return self._cur.lastrowid
#
# BEGIN PUBLIC FUNCTIONS
#
def create_station(self,station_name,web_address,ignore_artists=[],ignore_titles=[],youtube_playlist_id='',get_id=True,commit=True):
'''
Create a station and associated playlist
'''
with self._lock:
# Create a new playlist to use for this station
#playlist_name = self._make_playlist(station_name)
# v2 will use a different format for this... Probably another table?
ignore_artists = str(ignore_artists)
ignore_titles = str(ignore_titles)
#print(playlist_name)
self._cur.execute('''
INSERT IGNORE INTO Station(station_name,web_address,ignore_artists,ignore_titles,youtube_playlist_id,active)
VALUES ( %s, %s, %s, %s, %s, %s )''', (station_name,web_address,ignore_artists,ignore_titles,youtube_playlist_id,'true')
)
if commit:
self._conn.commit()
if get_id:
self._cur.execute('''
SELECT Station.id FROM Station WHERE
station_name=%s''',(station_name,))
return self._cur.fetchone()[0]
def add_track_to_station_playlist(self,station_name,artist,album,track,date,youtube_link='',commit = True):
'''
This public function takes a station common name
and a tuple representing the tracks data. It looks up the
playlist, creates an artist (if necessary), creates a
track (if necessary), and adds the track to the playlist
'''
with self._lock:
# This might happen. But upstream from here we should really be
# catching stuff like this
if artist == '' or track == '':
#print('Skipped')
return None
# Make a short link
youtube_link = youtube_link.replace('https://www.youtube.com/watch?v=','https://youtu.be/')
# Now that we have the data...
# Loop up the station's ID
station_id = self._get_station_id_from_name(station_name)
#print('playlist_id is :'+ playlist_id)
# Make (or don't) the artist
artist_id = self._make_artist(artist,commit=commit)
# Make (or don't) the album
album_id = self._make_album(artist_id,album,commit=commit)
# Make (or don't) a track
track_id = self._make_track(track,album_id,artist_id,youtube_link,commit=commit)
# Make a date. It's stored as a string because
# sqlite doesn't have a date data type. That's OK though
# because sqlite can search based on this string's structure.
#date_ms = int(floor(date.microsecond/1000))
date = date.strftime('%Y-%m-%d %H:%M:%S.%f')
# Now that we have the data we can make an entry
return self._add_playlist_entry(station_id,track_id,date,commit=commit)
def get_latest_station_tracks(self,station_name,num_tracks=1):
'''
Get a number of tracks from a station. Order from newest
to oldest.
'''
with self._lock:
station_id = self._get_station_id_from_name(station_name)
self._cur.execute('''SELECT Track.track_name, Artist.artist_name, Playlist.play_time, Track.youtube_link, Album.album_name, Track.filesystem_link FROM Playlist
JOIN Artist JOIN Track JOIN Album ON
Playlist.track_id = Track.id and Track.artist_id = Artist.id and Track.album_id = Album.id WHERE Playlist.station_id = %s
ORDER BY Playlist.play_time DESC LIMIT %s''',(station_id,num_tracks))
data = self._cur.fetchall()
# The data we will send back
tracks = []
for t in data:
temp = {}
temp['name'] = t[0]
temp['artist'] = t[1]
temp['time'] = t[2]
temp['youtube'] = t[3]
temp['album'] = t[4]
temp['filesystem'] = t[5]
tracks.append(temp)
if num_tracks == 1:
return tracks[0]
else:
return tracks
def get_station_data(self,station=None):
'''
Return a list of dictionaries of the station data
'''
with self._lock:
out_list = []
for s in self._get_all_stations():
id,name,web_address,ignore_artists,ignore_titles,youtube_playlist_id,active = s
if station is not None:
if name != station:
continue
channel_dict = {}
channel_dict['site'] = web_address
exec("channel_dict['ignoreartists'] = "+ ignore_artists)
exec("channel_dict['ignoretitles'] = "+ ignore_titles)
channel_dict['name'] = name
channel_dict['playlist'] = youtube_playlist_id
if (active == 1):
channel_dict['active'] = True
else:
channel_dict['active'] = False
try:
track_data = self.get_latest_station_tracks(name)
channel_dict['lastartist'] = track_data['artist']
channel_dict['lastsong'] = track_data['name']
except IndexError:
channel_dict['lastartist'] = ''
channel_dict['lastsong'] = ''
out_list.append(channel_dict)
if station is not None:
return out_list[0]
else:
return out_list
def look_up_song_youtube(self,artist,album,title):
'''
Given the artist, album, and title,
Look up the song's youtube URL
'''
with self._lock:
self._cur.execute('''SELECT Track.youtube_link from Track JOIN Artist JOIN Album ON
Track.artist_id = Artist.id and Track.album_id = Album.id WHERE Track.track_name = %s and Album.album_name = %s and Artist.artist_name = %s LIMIT 1''',
(title,album,artist))
url = self._cur.fetchone()
# LookupError seems better
if url == None:
raise LookupError
else:
| |
<gh_stars>0
"""
Support for Lenco DIR150BK and other Airmusic based Internet Radios.
"""
import logging
import requests
import xmltodict
VERSION = '0.0.1'
class airmusic(object):
"""
This class contains constants ands methods to implement the AirMusic API.
"""
# The KEY_... constants represent the corresponding key of the InfraRed Remote.
KEY_HOME = 1
KEY_UP = 2
KEY_DOWN = 3
KEY_LEFT = 4
KEY_RIGHT = 5
KEY_ENTER = 6
KEY_POWER = 7 # Toggle power on/off.
KEY_MUTE = 8
KEY_VOLUP = 9 # Volume up one step.
KEY_VOLDOWN = 10 # Volume down one step.
KEY_ALARMCLOCK = 11
KEY_SLEEPTIMER = 12
KEY_LANGUAGE = 13 # Open the language menu.
KEY_SCREENDIM = 14 # Toggle screen dim on/off.
KEY_CHANNELFAV = 15 # Show the favourites menu.
KEY_BUTTON0 = 17
KEY_BUTTON1 = 18
KEY_BUTTON2 = 19
KEY_BUTTON3 = 20
KEY_BUTTON4 = 21
KEY_BUTTON5 = 22
KEY_BUTTON6 = 23
KEY_BUTTON7 = 24
KEY_BUTTON8 = 25
KEY_BUTTON9 = 26
KEY_MODE = 28 # Toggle between the device modes: FM, IRadio, USB, AUX, UPNP, ...
KEY_STOP = 30 # Stop playing a song / station.
KEY_NEXT = 31 # Go to the next item.
KEY_PREV = 32 # Go to the next item.
KEY_USB = 36 # Swith to USB mode.
KEY_INTERNETRADIO = 40 # Switch to IRadio mode.
KEY_POWERSAVING = 105 # Go to the power saving menu, item 'Turn On'.
KEY_EQ_FLAT = 106 # Select "Flat" equaliser mode.
KEY_SYSTEMMENU = 110 # Go to the system menu.
KEY_WPS = 111 # Start WPS mode.
KEY_NEXTFAV = 112 # Go to the next station in the favourites list.
SID = {1, 'Stopped',
2, 'Buffering',
6, 'Playing',
7, 'Ending',
9, 'Paused',
12, 'Reading from file',
14, 'failed to connect', }
def __init__(self, device_address, timeout=5):
"""!
Constructor of the Airmusic API class.
@param device_address holds the device IP-address or resolvable name.
@param timeout determines the maximum amount of seconds to wait for a reply from the device.
"""
self.device_address = device_address
self.timeout = timeout
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] %(levelname)-8s %(name)-12s %(message)s',
filename=('airmusic-debug.log'),)
self.logger = logging.getLogger("airmusic")
# Will be updated after successful call to init() command.
self.language = None
self.hotkey_fav = None
self.push_talk = None
self.play_mode = None
self.sw_update = None
def __del__(self):
"""!
@private
Finalise the communication with the device by closing the session.
"""
self.logger = None # No logging possible at termination.
self.stop()
self.send_cmd('exit')
def __repr__(self):
"""!
@private
Return a string representation of the Airmusic API instance, showing the most important variables.
"""
ret = ""
ret += "Airmusic API Ver. {}".format(VERSION)
ret += "\n address={}".format(self.device_address)
ret += "\n timeout={}".format(self.timeout)
ret += "\n language={}".format(self.language)
ret += "\n hotkey={}".format(self.hotkey_fav)
ret += "\n push_talk={}".format(self.push_talk)
ret += "\n play_mode={}".format(self.play_mode)
ret += "\n sw_update={}".format(self.sw_update)
return ret
def __str__(self):
"""!
@private
Return a string representation of the Airmusic API instance, showing the most important variables.
"""
return self.__repr__()
def send_cmd(self, cmd, port=80, params=None):
"""!
Send the command and optional parameters to the device and receive the response.
Most commands will be sent to port 80, but some might require port 8080.
There are commands that have no parameters. In that case the params parameter can be omitted.
In case a command requires additional parameters, these must be passed as a dict().
For example, the list command has the following syntax:
http://.../list?id=1&start=1&count=15
In that case, parameter cmd will be set to 'list', and parameter params will be set to
the dict(id=1, start=1, count=15).
@param cmd is the command to send.
@param port is the http port to send the command to. Default is 80.
@param params holds the command parameters (as a dict).
"""
# The parameters for the command, if any, are received in a dict() structure.
if type(params) is not dict:
params = dict()
if self.logger:
self.logger.debug("Sending: {}".format(cmd))
# Send the command to the device. The Basic Authentication values are hardcoded.
result = requests.get('http://{}:{}/{}'.format(self.device_address, port, cmd),
auth=('su3g4go6sk7', '<PASSWORD>/^'),
params=params,
timeout=self.timeout)
if self.logger:
self.logger.debug("Response: headers={}, text=\"{}\"".format(result.headers, result.text))
if result.ok:
if 'html' in result.text: # Some commands, like set_dname, return an HTML page.
return dict(result='OK')
return xmltodict.parse(make_xml(result.text))
logging.error("Error in request: {} : {}".format(result.status_code, result.reason))
return None
# ========================================================================
# Properties
# ========================================================================
# Friendly name
def get_friendly_name(self):
"""!
Return the human readable name of the device.
@note Instead of this function, use the property friendly_name.
@return the device name (string).
"""
resp = self.send_cmd('irdevice.xml')
# <root><device><friendlyName>...</friendlyName></device></root>
return resp['root']['device']['friendlyName']
def set_friendly_name(self, value):
"""!
Assign a human readable name to the device.
@note Instead of this function, use the property friendly_name.
@param value the device name (string).
"""
resp = self.send_cmd('set_dname', params=dict(name=value))
# <result>OK</result>
return resp
friendly_name = property(get_friendly_name, set_friendly_name)
# log level
def get_log_level(self, loglevel):
"""!
Get the actual logging level. See the logging library for level values.
@note Instead of this function, use the property log_level.
@return the current log level.
"""
self.logger.setLevel(loglevel)
def set_log_level(self, loglevel):
"""!
Change the logging level. See the logging library for level values.
Default is logging.INFO level.
@note Instead of this function, use the property log_level.
@param loglevel specifies the level at which output to the logger will be activated.
"""
self.logger.setLevel(loglevel)
log_level = property(get_log_level, set_log_level)
# mute
def get_mute(self):
"""!
Fetch the mute state.
@note Instead of this function, use the property mute.
@return True if the device is muted, False if not muted.
"""
resp = self.get_background_play_status()
return True if resp['mute'] == '1' else False
def set_mute(self, value):
"""!
Specify mute on or off.
The device can be muted or unmuted, while not changing the volume level set.
It returns the tags:
- vol : to indicate the current volume level.
- mute : the mute flag; 0=Off 1=On.
@note Instead of this function, use the property mute.
@param value True to mute the device, False to unmute.
@return a dict holding vol and mute.
"""
resp = self.send_cmd('setvol', params=dict(mute=1 if value else 0))
return resp['result']
mute = property(get_mute, set_mute)
# volume
def get_volume(self):
"""!
Fetch the volume level.
@note Instead of this function, use the property volume.
@return the volume level (0 .. 15).
"""
resp = self.get_background_play_status()
return resp['vol']
def set_volume(self, value):
"""!
Specify the volume level.
The volume of the device can be specified in 16 steps, 0-15.
It returns the tags:
- vol : to indicate the current volume level.
- mute : the mute flag; 0=Off 1=On.
@note Instead of this function, use the property volume.
@param value is the volume level to set (0 .. 15).
@return a dict holding vol and mute.
"""
resp = self.send_cmd('setvol', params=dict(vol=value))
return resp['result']
volume = property(get_volume, set_volume)
# ========================================================================
# Public methods
# ========================================================================
def init(self, language='en'):
"""!
Initialize session and select the communication language.
The GUI on the device will show messages in the selected language.
The same is valid for the content of specific tags.
It returns the value of several system parameters, being:
- id (The ID for the main menu),
- version (The system version),
- wifi_set_url (URL to start scanning for APs, but its IP address is wrong!),
- ptver (date part of the version),
- hotkey_fav (The key of the choosen station in the hotkey list),
- push_talk (?),
- leave_msg (?),
- leave_msg_ios (?),
- M7_SUPPORT (Flag to indicate if this device has support for the M7 chipset),
- SMS_SUPPORT (Flag to indicate if SMS is supported),
- MKEY_SUPPORT (?),
- UART_CD (?),
- PlayMode (Represents the current play mode, eg FM, IRadio, ...),
- SWUpdate (If there is an update available, most of the time value NO).
@param language holds the communication language, eg. en, fr, de, nl, ...
@return a dict holding the system parameters and values.
"""
resp = self.send_cmd('init', params=dict(language=language))
# <result><id>1</id><lang>en</lang> ... </result>
result = resp['result']
self.language = result['lang']
self.hotkey_fav = result['hotkey_fav']
self.push_talk = result['push_talk']
self.play_mode = result['PlayMode']
self.sw_update = result['SWUpdate']
return result
def get_background_play_status(self):
"""!
Get the status info | |
an invalid generation
number.
"""
generation = request.args.get(version_field_name)
if generation is None:
return self.get_latest()
version = self.revisions.get(int(generation))
if version is None:
raise error_response.ErrorResponse(
'Precondition Failed: generation %s not found' % generation)
return version
def del_revision(self, request):
"""Delete a version of a fake GCS Blob.
:param request:flask.Request the contents of the HTTP request.
:return: True if the object entry in the Bucket should be deleted.
:rtype: bool
"""
generation = request.args.get('generation') or self.current_generation
if generation is None:
return True
self.revisions.pop(int(generation))
if len(self.revisions) == 0:
self.current_generation = None
return True
self.current_generation = sorted(self.revisions.keys())[-1]
return False
@classmethod
def _remove_non_writable_keys(cls, metadata):
"""Remove the keys from metadata (an update or patch) that are not
writable.
Both `Objects: patch` and `Objects: update` either ignore non-writable
keys or return 400 if the key does not match the current value. In
the testbench we simply always ignore them, to make life easier.
:param metadata:dict a dictionary representing a patch or
update to the metadata.
:return metadata but with only any non-writable keys removed.
:rtype: dict
"""
writeable_keys = {
'acl', 'cacheControl', 'contentDisposition', 'contentEncoding',
'contentLanguage', 'contentType', 'eventBasedHold', 'metadata',
'temporaryHold', 'storageClass'
}
for key in metadata.keys():
if key not in writeable_keys:
metadata.pop(key, None)
return metadata
def update_revision(self, request):
"""Update the metadata of particular object revision or raise.
:param request:flask.Request
:return: the object revision updated revision.
:rtype: GcsObjectVersion
:raises:ErrorResponse if the request contains an invalid generation
number.
"""
generation = request.args.get('generation')
if generation is None:
version = self.get_latest()
else:
version = self.revisions.get(int(generation))
if version is None:
raise error_response.ErrorResponse(
'Precondition Failed: generation %s not found' %
generation)
metadata = GcsObject._remove_non_writable_keys(json.loads(request.data))
version.update_from_metadata(metadata)
return version
def patch_revision(self, request):
"""Patch the metadata of particular object revision or raise.
:param request:flask.Request
:return: the object revision.
:rtype:GcsObjectRevision
:raises:ErrorResponse if the request contains an invalid generation
number.
"""
generation = request.args.get('generation')
if generation is None:
version = self.get_latest()
else:
version = self.revisions.get(int(generation))
if version is None:
raise error_response.ErrorResponse(
'Precondition Failed: generation %s not found' %
generation)
patch = GcsObject._remove_non_writable_keys(json.loads(request.data))
patched = testbench_utils.json_api_patch(
version.metadata, patch, recurse_on={'metadata'})
patched['metageneration'] = patched.get('metageneration', 0) + 1
version.metadata = patched
return version
def get_revision_by_generation(self, generation):
"""Get object revision by generation or None if not found.
:param generation:int
:return: the object revision by generation or None.
:rtype:GcsObjectRevision
"""
return self.revisions.get(generation, None)
def get_latest(self):
return self.revisions.get(self.current_generation, None)
def check_preconditions_by_value(
self, generation_match, generation_not_match, metageneration_match,
metageneration_not_match):
"""Verify that the given precondition values are met."""
current_generation = self.current_generation or 0
if (generation_match is not None
and int(generation_match) != current_generation):
raise error_response.ErrorResponse(
'Precondition Failed', status_code=412)
# This object does not exist (yet), testing in this case is special.
if (generation_not_match is not None
and int(generation_not_match) == current_generation):
raise error_response.ErrorResponse(
'Precondition Failed', status_code=412)
if self.current_generation is None:
if (metageneration_match is not None
or metageneration_not_match is not None):
raise error_response.ErrorResponse(
'Precondition Failed', status_code=412)
return
current = self.revisions.get(current_generation)
if current is None:
raise error_response.ErrorResponse(
'Object not found', status_code=404)
metageneration = current.metadata.get('metageneration')
if (metageneration_not_match is not None
and int(metageneration_not_match) == metageneration):
raise error_response.ErrorResponse(
'Precondition Failed', status_code=412)
if (metageneration_match is not None
and int(metageneration_match) != metageneration):
raise error_response.ErrorResponse(
'Precondition Failed', status_code=412)
def check_preconditions(
self,
request,
if_generation_match='ifGenerationMatch',
if_generation_not_match='ifGenerationNotMatch',
if_metageneration_match='ifMetagenerationMatch',
if_metageneration_not_match='ifMetagenerationNotMatch'):
"""Verify that the preconditions in request are met.
:param request:flask.Request the http request.
:param if_generation_match:str the name of the generation match
parameter name, typically 'ifGenerationMatch', but sometimes
'ifSourceGenerationMatch'.
:param if_generation_not_match:str the name of the generation not-match
parameter name, typically 'ifGenerationNotMatch', but sometimes
'ifSourceGenerationNotMatch'.
:param if_metageneration_match:str the name of the metageneration match
parameter name, typically 'ifMetagenerationMatch', but sometimes
'ifSourceMetagenerationMatch'.
:param if_metageneration_not_match:str the name of the metageneration
not-match parameter name, typically 'ifMetagenerationNotMatch', but
sometimes 'ifSourceMetagenerationNotMatch'.
:rtype:NoneType
"""
generation_match = request.args.get(if_generation_match)
generation_not_match = request.args.get(if_generation_not_match)
metageneration_match = request.args.get(if_metageneration_match)
metageneration_not_match = request.args.get(
if_metageneration_not_match)
self.check_preconditions_by_value(
generation_match, generation_not_match, metageneration_match,
metageneration_not_match)
def _insert_revision(self, revision):
"""Insert a new revision that has been initialized and checked.
:param revision: GcsObjectVersion the new revision to insert.
:rtype:NoneType
"""
update = {self.generation_generator: revision}
bucket = testbench_utils.lookup_bucket(self.bucket_name)
if not bucket.versioning_enabled():
self.revisions = update
else:
self.revisions.update(update)
self.current_generation = self.generation_generator
def insert(self, gcs_url, request):
"""Insert a new revision based on the give flask request.
:param gcs_url:str the root URL for the fake GCS service.
:param request:flask.Request the contents of the HTTP request.
:return: the newly created object version.
:rtype: GcsObjectVersion
"""
media = testbench_utils.extract_media(request)
self.generation_generator += 1
revision = GcsObjectVersion(
gcs_url, self.bucket_name, self.name, self.generation_generator,
request, media)
meta = revision.metadata.setdefault('metadata', {})
meta['x_testbench_upload'] = 'simple'
self._insert_revision(revision)
return revision
def insert_multipart(self, gcs_url, request, resource, media_headers, media_body):
"""Insert a new revision based on the give flask request.
:param gcs_url:str the root URL for the fake GCS service.
:param request:flask.Request the contents of the HTTP request.
:param resource:dict JSON resource with object metadata.
:param media_headers:dict media headers in a multi-part upload.
:param media_body:str object data in a multi-part upload.
:return: the newly created object version.
:rtype: GcsObjectVersion
"""
# There are two ways to specify the content-type, the 'content-type'
# header and the resource['contentType'] field. They must be consistent,
# and the service generates an error when they are not.
if (resource.get('contentType') is not None and
media_headers.get('content-type') is not None and
resource.get('contentType') != media_headers.get('content-type')):
raise error_response.ErrorResponse(
('Content-Type specified in the upload (%s) does not match' +
'contentType specified in the metadata (%s).') % (
media_headers.get('content-type'),
resource.get('contentType')),
status_code=400)
# Set the contentType in the resource from the header. Note that if both
# are set they have the same value.
resource.setdefault('contentType', media_headers.get('content-type'))
self.generation_generator += 1
revision = GcsObjectVersion(
gcs_url, self.bucket_name, self.name, self.generation_generator,
request, media_body)
meta = revision.metadata.setdefault('metadata', {})
meta['x_testbench_upload'] = 'multipart'
meta['x_testbench_md5'] = resource.get('md5Hash', '')
meta['x_testbench_crc32c'] = resource.get('crc32c', '')
# Apply any overrides from the resource object part.
revision.update_from_metadata(resource)
self._insert_revision(revision)
return revision
def insert_resumable(self, gcs_url, request, media, resource):
"""Implement the final insert for a resumable upload.
:param gcs_url:str the root URL for the fake GCS service.
:param request:flask.Request the contents of the HTTP request.
:param media:str the media for the object.
:param resource:dict the metadata for the object.
:return: the newly created object version.
:rtype: GcsObjectVersion
"""
self.generation_generator += 1
revision = GcsObjectVersion(
gcs_url, self.bucket_name, self.name, self.generation_generator,
request, media)
meta = revision.metadata.setdefault('metadata', {})
meta['x_testbench_upload'] = 'resumable'
meta['x_testbench_md5'] = resource.get('md5Hash', '')
meta['x_testbench_crc32c'] = resource.get('crc32c', '')
# Apply any overrides from the resource object part.
revision.update_from_metadata(resource)
self._insert_revision(revision)
return revision
def insert_xml(self, gcs_url, request):
"""Implement the insert operation using the XML API.
:param gcs_url:str the root URL for the fake GCS service.
:param request:flask.Request the contents of the HTTP request.
:return: the newly created object version.
:rtype: GcsObjectVersion
"""
media = testbench_utils.extract_media(request)
self.generation_generator += 1
goog_hash = request.headers.get('x-goog-hash')
md5hash = None
crc32c = None
if goog_hash is not None:
for hash in goog_hash.split(','):
if hash.startswith('md5='):
md5hash = hash[4:]
if hash.startswith('crc32c='):
crc32c = hash[7:]
revision = GcsObjectVersion(
gcs_url, self.bucket_name, self.name, self.generation_generator,
request, media)
meta = revision.metadata.setdefault('metadata', {})
meta['x_testbench_upload'] = 'xml'
if md5hash is not None:
meta['x_testbench_md5'] = md5hash
revision.update_from_metadata({
'md5Hash': md5hash,
})
if crc32c is not None:
meta['x_testbench_crc32c'] = crc32c
revision.update_from_metadata({
'crc32c': crc32c,
})
self._insert_revision(revision)
return revision
def copy_from(self, gcs_url, request, source_revision):
"""Insert a new revision based on the give flask request.
:param gcs_url:str the root URL for the fake GCS service.
:param request:flask.Request the contents of the HTTP request.
:param source_revision:GcsObjectVersion the source object version to
copy from.
:return: the newly created object version.
:rtype: GcsObjectVersion
"""
self.generation_generator += 1
source_revision.validate_encryption_for_read(request)
revision = GcsObjectVersion(
gcs_url, self.bucket_name, self.name, self.generation_generator,
request, source_revision.media)
revision.reset_predefined_acl(
request.args.get('destinationPredefinedAcl'))
metadata = json.loads(request.data)
revision.update_from_metadata(metadata)
self._insert_revision(revision)
return revision
def compose_from(self, gcs_url, request, composed_media):
"""Compose a new revision based on the give flask request.
:param gcs_url:str the root URL for the fake GCS service.
:param request:flask.Request the contents of the HTTP request.
:param composed_media:str contents of the composed object
:return: the newly created object version.
:rtype: GcsObjectVersion
"""
self.generation_generator += 1
revision = GcsObjectVersion(
gcs_url, self.bucket_name, self.name, self.generation_generator,
request, composed_media)
revision.reset_predefined_acl(
request.args.get('destinationPredefinedAcl'))
payload = json.loads(request.data)
if payload.get('destination') is not None:
revision.update_from_metadata(payload.get('destination'))
# The server often discards the MD5 Hash when composing objects, we can
# easily maintain them in the testbench, | |
# coding=utf-8
""" Google Earth Engine Landsat Collections """
from .visualization import *
from .datasets import OpticalSatellite, ImageCollection
from .bands import OpticalBand, BitBand, ClassificationBand, ExpressionBand,\
Precisions
from .helpers import TODAY
from functools import partial
from . import register
from .masks import Mask
import geetools
START = {1: '1972-07-23', 2: '1975-01-22', 3: '1978-03-05',
4: '1982-07-16', 5: '1984-01-01', 7: '1999-01-01',
8: '2013-04-11'}
END = {1: '1978-01-07', 2: '1982-02-26', 3: '1983-03-31',
4: '1993-12-14', 5: '2012-05-05', 7: TODAY, 8: TODAY}
IDS = [
'LANDSAT/LM01/C01/T2',
'LANDSAT/LM02/C01/T2',
'LANDSAT/LM03/C01/T2',
'LANDSAT/LM04/C01/T2',
'LANDSAT/LM05/C01/T1',
'LANDSAT/LM05/C01/T2',
'LANDSAT/LT04/C01/T1', 'LANDSAT/LT04/C01/T1_TOA', 'LANDSAT/LT04/C01/T1_SR',
'LANDSAT/LT04/C01/T2', 'LANDSAT/LT04/C01/T2_TOA', 'LANDSAT/LT04/C01/T2_SR',
'LANDSAT/LT05/C01/T1', 'LANDSAT/LT05/C01/T1_TOA', 'LANDSAT/LT05/C01/T1_SR',
'LANDSAT/LT05/C01/T2', 'LANDSAT/LT05/C01/T2_TOA', 'LANDSAT/LT05/C01/T2_SR',
'LANDSAT/LE07/C01/T1', 'LANDSAT/LE07/C01/T1_TOA', 'LANDSAT/LE07/C01/T1_SR',
'LANDSAT/LE07/C01/T2', 'LANDSAT/LE07/C01/T2_TOA', 'LANDSAT/LE07/C01/T2_SR',
'LANDSAT/LC08/C01/T1', 'LANDSAT/LC08/C01/T1_TOA', 'LANDSAT/LC08/C01/T1_SR',
'LANDSAT/LC08/C01/T2', 'LANDSAT/LC08/C01/T2_TOA', 'LANDSAT/LC08/C01/T2_SR',
]
class Landsat(OpticalSatellite, ImageCollection):
""" Landsat Collection """
INFO = """ID: {id}
Short Name: {short_name}
Spacecraft: {spacecraft}
Number: {number}
Sensor: {sensor}
Process: {process}
Start Date: {start_date}
End Date: {end_date}
Cloud Cover: {cloud_cover}
Tier: {tier}
Bands: {bands}
Masks: {masks}
visualizers: {visualizers}
"""
spacecraft = 'LANDSAT'
cloud_cover = 'CLOUD_COVER'
number = None
tier = None
process = None
sensor = None
def __init__(self, **kwargs):
super(Landsat, self).__init__(**kwargs)
class Tier1:
tier = 1
class Tier2:
tier = 2
class RAW:
_extra = dict(precision='uint8', scale=1/255)
process = 'RAW'
class TOA:
_extra = dict(precision='float')
process = 'TOA'
def atm_op_decoder(image):
scale = 0.001
image = image.multiply(scale)
clear = image.lt(0.1).rename('clear')
hazy= image.gt(0.3).rename('hazy')
average = image.gte(0.1).And(image.lte(0.3)).rename('average')
return geetools.tools.image.mixBands([clear, hazy, average])
class SR:
_extra = dict(scale=0.0001, precision=Precisions.int16)
process = 'SR'
atm_op = ClassificationBand(
name='sr_atmos_opacity',
alias='atmos_opacity',
precision=Precisions.int16,
resolution=30,
classes= dict(
clear= 'value<0.1',
average= '0.1>=value<=0.3',
hazy= 'value>0.3'
),
decoder=atm_op_decoder,
positives=['clear', 'average'],
negatives=['hazy']
)
atm_op_vis = Visualization('atmos_opacity', [atm_op],
0, 300, ['green', 'red'])
sr_cloud_qa = BitBand(
name='sr_cloud_qa',
alias='cloud_qa',
precision=Precisions.uint8,
resolution=30,
bits={
'0': {1:'ddv'},
'1': {1:'cloud'},
'2': {1:'shadow'},
'3': {1:'adjacent'},
'4': {1:'snow'},
'5': {1:'water'}
},
negatives=['ddv', 'cloud', 'shadow', 'adjacent', 'snow', 'water']
)
pixel_qa = BitBand(
name='pixel_qa',
alias='pixel_qa',
precision=Precisions.uint16,
resolution=30,
bits={'1': {1:'clear'}, '2': {1:'water'},
'3': {1:'shadow'}, '4': {1:'snow'},
'5': {1:'cloud'},
'6-7':{3:'high_confidence_cloud'},
'8-9':{3:'high_confidence_cirrus'}
},
positives=['clear'],
negatives=['water', 'shadow', 'snow', 'cloud', 'high_confidence_cloud',
'high_confidence_cirrus']
)
radsat_qa = BitBand(
name='radsat_qa',
alias='radsat_qa',
precision=Precisions.uint8,
resolution=30,
bits={
1: {1:'B1_saturated'},
2: {1:'B2_saturated'},
3: {1:'B3_saturated'},
4: {1:'B4_saturated'},
5: {1:'B5_saturated'},
6: {1:'B6_saturated'},
7: {1:'B7_saturated'},
},
negatives=['B1_saturated', 'B2_saturated', 'B3_saturated',
'B4_saturated', 'B5_saturated', 'B6_saturated',
'B7_saturated']
)
class MSS:
sensor = 'MSS'
green = partial(OpticalBand, alias='green', resolution=60,
units='DN', wavelength=(0.5, 0.6))
red = partial(OpticalBand, alias='red', resolution=60,
units='DN', wavelength=(0.6, 0.7))
nir = partial(OpticalBand, alias='nir', resolution=60,
units='DN', wavelength=(0.7, 0.8))
nir2 = partial(OpticalBand, alias='nir2', resolution=30,
units='DN', wavelength=(0.8, 1.1))
bqa = BitBand(
name='BQA',
alias='bqa',
precision=Precisions.uint16,
resolution=60,
bits={'4': {1: 'cloud'}},
negatives=['cloud']
)
masks = (Mask.fromBand('BQA', bqa),)
class TM:
sensor = 'TM'
blue = partial(OpticalBand, 'B1', 'blue', resolution=30,
wavelength=(0.45, 0.52))
green = partial(OpticalBand, 'B2', 'green', resolution=30,
wavelength=(0.52, 0.6))
red = partial(OpticalBand, 'B3', 'red', resolution=30,
wavelength=(0.63, 0.69))
nir = partial(OpticalBand, 'B4', 'nir', resolution=30,
wavelength=(0.76, 0.9))
swir = partial(OpticalBand, 'B5', 'swir', resolution=30,
wavelength=(1.55, 1.75))
thermal = partial(OpticalBand, 'B6', 'thermal', units='Kelvin',
resolution=30, wavelength=(10.4, 12.5))
swir2 = partial(OpticalBand, 'B7', 'swir2', resolution=30,
wavelength=(2.08, 2.35))
bqa = BitBand(
name='BQA', alias='bqa',
precision=Precisions.uint16, resolution=30,
bits= {
'4': {1: 'cloud'},
'5-6': {3: 'high_confidence_cloud'},
'7-8': {3: 'shadow'},
'9-10': {3: 'snow'}
}
)
masks = (Mask.fromBand('BQA', bqa),)
class ETM:
sensor = 'ETM+'
blue = TM.blue
green = TM.green
red = TM.red
nir = TM.nir
swir = TM.swir
thermal = TM.thermal
thermal_vcid_1 = partial(OpticalBand, 'B6_VCID_1', 'B6_vcid_1',
units='Kelvin', resolution=30,
wavelength=(10.4, 12.5))
thermal_vcid_2 = partial(OpticalBand, 'B6_VCID_2', 'B6_vcid_2',
units='Kelvin', resolution=30,
wavelength=(10.4, 12.5))
swir2 = TM.swir2
bqa = TM.bqa
masks = (Mask.fromBand('BQA', bqa),)
class OLI:
sensor = 'OLI'
aerosol = partial(OpticalBand, 'B1', 'coastal_aerosol',
resolution=30, wavelength=(0.43, 0.45))
blue = partial(OpticalBand, 'B2', 'blue',
resolution=30, wavelength=(0.45, 0.51))
green = partial(OpticalBand, 'B3', 'green',
resolution=30, wavelength=(0.53, 0.59))
red = partial(OpticalBand, 'B4', 'red',
resolution=30, wavelength=(0.64, 0.67))
nir = partial(OpticalBand, 'B5', 'nir',
resolution=30, wavelength=(0.85, 0.88))
swir = partial(OpticalBand, 'B6', 'swir',
resolution=30, wavelength=(1.57, 1.65))
swir2 = partial(OpticalBand, 'B7', 'swir2',
resolution=30, wavelength=(2.11, 2.29))
pan = partial(OpticalBand, 'B8', 'pan',
resolution=15, wavelength=(0.52, 0.9))
cirrus = partial(OpticalBand, 'B9', 'cirrus',
resolution=15, wavelength=(1.36, 1.38))
thermal = partial(OpticalBand, 'B10', 'thermal',
resolution=30, scale=0.1, wavelength=(10.60, 11.19))
thermal2 = partial(OpticalBand, 'B11', 'thermal2',
resolution=30, scale=0.1, wavelength=(11.50, 12.51))
bqa = BitBand(
name='BQA', alias='bqa',
precision=Precisions.uint16, resolution=30,
bits= {
'4': {1: 'cloud'},
'5-6': {3: 'high_confidence_cloud'},
'7-8': {3: 'shadow'},
'9-10': {3: 'snow'},
'11-12': {3: 'cirrus'}
}
)
class RAW:
_extra = dict(precision=Precisions.uint16)
class SR:
aerosol = BitBand('sr_aerosol', 'sr_aerosol',
precision=Precisions.uint8,
bits= {
'1': {1: 'aerosol_valid'},
'2': {1: 'aerosol_interpolated'},
'3': {1: 'water'},
'6-7': {0: 'climatology', 1: 'low',
2:'medium', 3:'high'}
})
radsat_qa = BitBand(
name='radsat_qa',
alias='radsat_qa',
precision=Precisions.uint16,
resolution=30,
bits={
1: {1:'B1_saturated'},
2: {1:'B2_saturated'},
3: {1:'B3_saturated'},
4: {1:'B4_saturated'},
5: {1:'B5_saturated'},
6: {1:'B6_saturated'},
7: {1:'B7_saturated'},
9: {1:'B9_saturated'},
10: {1:'B10_saturated'},
11: {1:'B11_saturated'}
},
negatives=['B1_saturated', 'B2_saturated', 'B3_saturated',
'B4_saturated', 'B5_saturated', 'B6_saturated',
'B7_saturated', 'B9_saturated', 'B10_saturated',
'B11_saturated']
)
class Landsat1(MSS, Landsat):
""" Landsat 1 """
number = 1
start_date = '1972-07-26'
end_date = '1978-01-06'
@register
class Landsat1RAW(Tier1, RAW, Landsat1):
""" Landsat 1 Tier 1 """
id = 'LANDSAT/LM01/C01/T1'
short_name = 'L1RAW'
green = MSS.green(name='B4', **RAW._extra)
red = MSS.red(name='B5', **RAW._extra)
nir = MSS.nir(name='B6', **RAW._extra)
nir2 = MSS.nir2(name='B7', **RAW._extra)
bands = (green, red, nir, nir2, MSS.bqa)
visualizers = Visualizers(
FalseColor = Visualization.falseColor([nir, red, green]),
)
ndvi = ExpressionBand('NDVI', 'ndvi', '(nir-red)/(nir+red)',
[nir, red], precision='float')
extra_bands = (ndvi,)
def __init__(self, **kwargs):
super(Landsat1RAW, self).__init__(**kwargs)
@register
class Landsat1RAWT2(Tier2, Landsat1RAW):
""" Landsat 1 Tier 2 """
id = 'LANDSAT/LM01/C01/T2'
short_name = 'L1RAWT2'
def __init__(self, **kwargs):
super(Landsat1RAWT2, self).__init__(**kwargs)
class Landsat2(MSS, Landsat):
number = 2
start_date = '1975-01-31'
end_date = '1982-02-03'
@register
class Landsat2RAW(Tier1, RAW, Landsat2):
""" Landsat 2 Tier 1 """
id = 'LANDSAT/LM02/C01/T1'
short_name = 'L2RAW'
bands = (Landsat1RAW.green, Landsat1RAW.red, Landsat1RAW.nir, Landsat1RAW.nir2,
MSS.bqa)
visualizers = Landsat1RAW.visualizers
extra_bands = Landsat1RAW.extra_bands
def __init__(self, **kwargs):
super(Landsat2RAW, self).__init__(**kwargs)
@register
class Landsat2RAWT2(Tier2, Landsat2RAW):
id = 'LANDSAT/LM02/C01/T2'
short_name = 'L2RAWT2'
def __init__(self, **kwargs):
super(Landsat2RAWT2, self).__init__(**kwargs)
class Landsat3(MSS, Landsat):
number = 3
start_date = '1978-06-03'
end_date = '1983-02-23'
@register
class Landsat3RAW(Tier1, RAW, Landsat3):
id = 'LANDSAT/LM03/C01/T1'
short_name = 'L3RAW'
bands = (Landsat1RAW.green, Landsat1RAW.red, Landsat1RAW.nir, Landsat1RAW.nir2,
MSS.bqa)
visualizers = Landsat1RAW.visualizers
extra_bands = Landsat1RAW.extra_bands
def __init__(self, **kwargs):
super(Landsat3RAW, self).__init__(**kwargs)
@register
class Landsat3RAWT2(Tier2, Landsat3RAW):
id = 'LANDSAT/LM03/C01/T2'
short_name = 'L3RAWT2'
def __init__(self, **kwargs):
super(Landsat3RAWT2, self).__init__(**kwargs)
class Landsat4MSS(MSS, Landsat):
number = 4
start_date = '1982-08-14'
end_date = '1992-08-28'
@register
class Landsat4MSSRAW(Tier1, RAW, Landsat4MSS):
""" Landsat 4 MSS """
id = 'LANDSAT/LM04/C01/T1'
short_name = 'L4MSSRAW'
green = MSS.green(name='B1', **RAW._extra)
red = MSS.red(name='B2', **RAW._extra)
nir = MSS.nir(name='B3', **RAW._extra)
nir2 = MSS.nir2(name='B4', **RAW._extra)
bands = (green, red, nir, nir2, MSS.bqa)
visualizers = Visualizers(
FalseColor = Visualization.falseColor([nir, red, green]),
)
ndvi = ExpressionBand('NDVI', 'ndvi', '(nir-red)/(nir+red)',
[nir, red], precision='float')
extra_bands = (ndvi,)
def __init__(self, **kwargs):
super(Landsat4MSSRAW, self).__init__(**kwargs)
@register
class Landsat4MSSRAWT2(Tier2, Landsat4MSSRAW):
""" Landsat 4 MSS """
id = 'LANDSAT/LM04/C01/T2'
short_name = 'L4MSSRAWT2'
def __init__(self, **kwargs):
super(Landsat4MSSRAWT2, self).__init__(**kwargs)
class Landsat4TM(TM, Landsat):
""" Landsat 4 TM Raw Tier 1 """
number = 4
start_date = '1982-08-22'
end_date = '1993-11-18'
@register
class Landsat4RAW(Tier1, RAW, Landsat4TM):
id = 'LANDSAT/LT04/C01/T1'
short_name = 'L4RAW'
blue = TM.blue(**RAW._extra)
green = TM.green(**RAW._extra)
red = TM.red(**RAW._extra)
nir = TM.nir(**RAW._extra)
swir = TM.swir(**RAW._extra)
thermal = TM.thermal(**RAW._extra)
swir2 = TM.swir2(**RAW._extra)
bands = (blue, green, red, nir, swir, thermal, swir2, TM.bqa)
visualizers = Visualizers(
TrueColor = Visualization.trueColor([red, green, blue]),
FalseColor = Visualization.falseColor([nir, red, green]),
NSR = Visualization.NSR([nir, swir, red])
)
masks = (Mask.fromBand('BQA', TM.bqa),)
ndvi = ExpressionBand('NDVI', 'ndvi', '(nir-red)/(nir+red)',
[nir, red], precision='float')
nbr = ExpressionBand('NBR', 'nbr', '(nir-swir)/(nir+swir)',
[nir, swir], precision='float')
extra_bands = (ndvi, nbr)
def __init__(self, **kwargs):
super(Landsat4RAW, self).__init__(**kwargs)
@register
class Landsat4RAWT2(Tier2, Landsat4RAW):
id = 'LANDSAT/LT04/C01/T2'
short_name = 'L4RAWT2'
def __init__(self, **kwargs):
super(Landsat4RAWT2, self).__init__(**kwargs)
@register
class Landsat4TOA(Tier1, TOA, Landsat4TM):
id = 'LANDSAT/LT04/C01/T1_TOA'
short_name = 'L4TOA'
blue = TM.blue(**TOA._extra)
green = TM.green(**TOA._extra)
red = TM.red(**TOA._extra)
nir = TM.nir(**TOA._extra)
swir = TM.swir(**TOA._extra)
thermal = TM.thermal(**TOA._extra)
swir2 = TM.swir2(**TOA._extra)
bands = (blue, green, red, nir, swir, thermal, swir2, TM.bqa)
visualizers = Visualizers(
TrueColor = Visualization.trueColor([red, green, blue]),
FalseColor = Visualization.falseColor([nir, red, green]),
NSR = Visualization.NSR([nir, swir, red])
)
masks = (Mask.fromBand('BQA', TM.bqa),)
ndvi = ExpressionBand('NDVI', 'ndvi', '(nir-red)/(nir+red)',
[nir, red], precision='float')
nbr = ExpressionBand('NBR', 'nbr', '(nir-swir)/(nir+swir)',
[nir, swir], precision='float')
extra_bands = (ndvi, nbr)
def __init__(self, **kwargs):
super(Landsat4TOA, self).__init__(**kwargs)
@register
class Landsat4TOAT2(Tier2, Landsat4TOA):
id = 'LANDSAT/LT04/C01/T2_TOA'
short_name = 'L4TOAT2'
def __init__(self, **kwargs):
super(Landsat4TOAT2, self).__init__(**kwargs)
@register
class Landsat4SR(Tier1, SR, Landsat4TM):
id = 'LANDSAT/LT04/C01/T1_SR'
short_name = 'L4SR'
blue = TM.blue(**SR._extra)
green = TM.green(**SR._extra)
red = TM.red(**SR._extra)
nir = TM.nir(**SR._extra)
swir = TM.swir(**SR._extra)
thermal = TM.thermal(**SR._extra)
swir2 = TM.swir2(**SR._extra)
bands = (blue, green, red, nir, swir, thermal, swir2, SR.atm_op,
SR.sr_cloud_qa, SR.pixel_qa, SR.radsat_qa)
visualizers = Visualizers(
TrueColor = Visualization.trueColor([red, green, blue]),
FalseColor = Visualization.falseColor([nir, red, green]),
NSR = Visualization.NSR([nir, swir, red]),
AtmosphericOpacity = SR.atm_op_vis
)
masks = (Mask.fromBand('pixel_qa', SR.pixel_qa),
Mask.fromBand('cloud_qa', | |
show version
Cisco IOS Software, 901 Software (ASR901-UNIVERSALK9-M), Version 15.6(2)SP4, RELEASE SOFTWARE (fc3)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2018 by Cisco Systems, Inc.
Compiled Mon 19-Mar-18 16:39 by prod_rel_team
ROM: System Bootstrap, Version 15.6(2r)SP4, RELEASE SOFTWARE (fc1)
LAB-ASR901T uptime is 26 weeks, 21 hours, 26 minutes
System returned to ROM by reload at 15:57:52 CDT Mon Sep 24 2018
System restarted at 15:59:27 CDT Mon Sep 24 2018
System image file is "flash:asr901-universalk9-mz.156-2.SP4.bin"
Last reload type: Normal Reload
Last reload reason: Reload Command
This product contains cryptographic features and is subject to United
States and local country laws governing import, export, transfer and
use. Delivery of Cisco cryptographic products does not imply
third-party authority to import, export, distribute or use encryption.
Importers, exporters, distributors and users are responsible for
compliance with U.S. and local country laws. By using this product you
agree to comply with applicable laws and regulations. If you are unable
to comply with U.S. and local laws, return this product immediately.
A summary of U.S. laws governing Cisco cryptographic products may be found at:
http://www.cisco.com/wwl/export/crypto/tool/stqrg.html
If you require further assistance please contact us by sending email to
<EMAIL>.
License Level: AdvancedMetroIPAccess
License Type: Smart License
Next reload license Level: AdvancedMetroIPAccess
Cisco A901-6CZ-FT-D (P2020) processor (revision 1.0) with 393216K/131072K bytes of memory.
Processor board ID CAT1733U070
P2020 CPU at 800MHz, E500v2 core, 512KB L2 Cache
1 External Alarm interface
1 FastEthernet interface
12 Gigabit Ethernet interfaces
2 Ten Gigabit Ethernet interfaces
1 terminal line
8 Channelized T1 ports
256K bytes of non-volatile configuration memory.
98304K bytes of processor board System flash (Read/Write)
Configuration register is 0x2102
'''}
golden_output_c4507 = {'execute.return_value': '''
Cisco IOS Software, IOS-XE Software, Catalyst 4500 L3 Switch Software (cat4500e-UNIVERSALK9-M), Version 03.03.02.SG RELEASE SOFTWARE (fc1)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2012 by Cisco Systems, Inc.
Compiled Tue 23-Oct-12 23:51 by prod_rel_team
ROM: 15.0(1r)SG5
switchname uptime is 6 years, 2 weeks, 13 hours, 31 minutes
Uptime for this control processor is 6 years, 2 weeks, 13 hours, 33 minutes
System returned to ROM by reload
System restarted at 09:57:20 GMT Tue Oct 15 2013
Running default software
Jawa Revision 7, Snowtrooper Revision 0x0.0x1C
Last reload reason: Reload command
This product contains cryptographic features and is subject to United
States and local country laws governing import, export, transfer and
use. Delivery of Cisco cryptographic products does not imply
third-party authority to import, export, distribute or use encryption.
Importers, exporters, distributors and users are responsible for
compliance with U.S. and local country laws. By using this product you
agree to comply with applicable laws and regulations. If you are unable
to comply with U.S. and local laws, return this product immediately.
A summary of U.S. laws governing Cisco cryptographic products may be found at:
http://www.cisco.com/wwl/export/crypto/tool/stqrg.html
If you require further assistance please contact us by sending email to
<EMAIL>.
License Information for 'WS-X45-SUP7-E'
License Level: entservices Type: Permanent
Next reboot license Level: entservices
cisco WS-C4507R+E (MPC8572) processor (revision 10) with 2097152K/20480K bytes of memory.
Processor board ID FXS1729E2TD
MPC8572 CPU at 1.5GHz, Supervisor 7
Last reset from Reload
9 Virtual Ethernet interfaces
240 Gigabit Ethernet interfaces
4 Ten Gigabit Ethernet interfaces
511K bytes of non-volatile configuration memory.
Configuration register is 0x2101 (will be 0x2102 at next reload)
'''}
golden_parsed_output_c4507 = {
'version': {
'chassis': 'WS-C4507R+E',
'chassis_sn': 'FXS1729E2TD',
'compiled_by': 'prod_rel_team',
'compiled_date': 'Tue 23-Oct-12 23:51',
'curr_config_register': '0x2101',
'hostname': 'switchname',
'image_id': 'cat4500e-UNIVERSALK9-M',
'image_type': 'production image',
'jawa_revision': '7',
'last_reload_reason': 'Reload',
'license_level': 'entservices',
'license_type': 'Permanent',
'main_mem': '2097152',
'mem_size': {
'non-volatile configuration': '511'
},
'next_config_register': '0x2102',
'next_reload_license_level': 'entservices',
'number_of_intfs': {
'Gigabit Ethernet': '240',
'Ten Gigabit Ethernet': '4',
'Virtual Ethernet': '9'
},
'os': 'IOS-XE',
'platform': 'Catalyst 4500 L3 Switch',
'processor': {
'cpu_type': 'MPC8572',
'speed': '1.5GHz',
'supervisor': '7'
},
'processor_type': 'MPC8572',
'returned_to_rom_by': 'reload',
'rom': '15.0(1r)SG5',
'rtr_type': 'WS-C4507R+E',
'running_default_software': True,
'snowtrooper_revision': '0x0.0x1C',
'system_restarted_at': '09:57:20 GMT Tue Oct 15 2013',
'uptime': '6 years, 2 weeks, 13 hours, 31 minutes',
'uptime_this_cp': '6 years, 2 weeks, 13 hours, 33 minutes',
'version': '03.03.02.SG',
'version_short': '03.03'
}
}
golden_output_1 = {'execute.return_value': '''
Cisco IOS Software, IOS-XE Software, Catalyst 4500 L3 Switch Software (cat4500e-UNIVERSALK9-M), Version 03.04.06.SG RELEASE SOFTWARE (fc1)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2015 by Cisco Systems, Inc.
Compiled Mon 04-May-15 02:44 by prod_rel_team
Cisco IOS-XE software, Copyright (c) 2005-2010, 2012 by cisco Systems, Inc.
All rights reserved. Certain components of Cisco IOS-XE software are
licensed under the GNU General Public License ("GPL") Version 2.0. The
software code licensed under GPL Version 2.0 is free software that comes
with ABSOLUTELY NO WARRANTY. You can redistribute and/or modify such
GPL code under the terms of GPL Version 2.0. For more details, see the
documentation or "License Notice" file accompanying the IOS-XE software,
or the applicable URL provided on the flyer accompanying the IOS-XE
software.
ROM: 15.0(1r)SG10
sample_4510r_e uptime is 2 years, 11 weeks, 3 days, 3 hours, 3 minutes
Uptime for this control processor is 2 years, 11 weeks, 1 day, 22 hours, 18 minutes
System returned to ROM by SSO Switchover
System restarted at 19:11:28 GMT Tue Aug 22 2017
System image file is "bootflash:cat4500e-universalk9.SPA.03.04.06.SG.151-2.SG6.bin"
Jawa Revision 7, Snowtrooper Revision 0x0.0x1C
Last reload reason: power-on
This product contains cryptographic features and is subject to United
States and local country laws governing import, export, transfer and
use. Delivery of Cisco cryptographic products does not imply
third-party authority to import, export, distribute or use encryption.
Importers, exporters, distributors and users are responsible for
compliance with U.S. and local country laws. By using this product you
agree to comply with applicable laws and regulations. If you are unable
to comply with U.S. and local laws, return this product immediately.
A summary of U.S. laws governing Cisco cryptographic products may be found at:
http://www.cisco.com/wwl/export/crypto/tool/stqrg.html
If you require further assistance please contact us by sending email to
<EMAIL>.
License Information for 'WS-X45-SUP7-E'
License Level: entservices Type: Permanent
Next reboot license Level: entservices
cisco WS-C4510R+E (MPC8572) processor (revision 11) with 2097152K/20480K bytes of memory.
Processor board ID JAD213101PP
MPC8572 CPU at 1.5GHz, Supervisor 7
Last reset from PowerUp
8 Virtual Ethernet interfaces
384 Gigabit Ethernet interfaces
8 Ten Gigabit Ethernet interfaces
511K bytes of non-volatile configuration memory.
Configuration register is 0x2102
'''}
golden_parsed_output_1 = {
'version': {
'version_short': '03.04',
'platform': 'Catalyst 4500 L3 Switch',
'version': '03.04.06.SG',
'image_id': 'cat4500e-UNIVERSALK9-M',
'os': 'IOS-XE',
'image_type': 'production image',
'compiled_date': 'Mon 04-May-15 02:44',
'compiled_by': 'prod_rel_team',
'rom': '15.0(1r)SG10',
'hostname': 'sample_4510r_e',
'uptime': '2 years, 11 weeks, 3 days, 3 hours, 3 minutes',
'uptime_this_cp': '2 years, 11 weeks, 1 day, 22 hours, 18 minutes',
'returned_to_rom_by': 'SSO Switchover',
'system_restarted_at': '19:11:28 GMT Tue Aug 22 2017',
'system_image': 'bootflash:cat4500e-universalk9.SPA.03.04.06.SG.151-2.SG6.bin',
'jawa_revision': '7',
'snowtrooper_revision': '0x0.0x1C',
'last_reload_reason': 'PowerUp',
'license_type': 'Permanent',
'license_level': 'entservices',
'next_reload_license_level': 'entservices',
'chassis': 'WS-C4510R+E',
'main_mem': '2097152',
'processor_type': 'MPC8572',
'rtr_type': 'WS-C4510R+E',
'chassis_sn': 'JAD213101PP',
'processor': {
'cpu_type': 'MPC8572',
'speed': '1.5GHz',
'supervisor': '7'
},
'number_of_intfs': {
'Virtual Ethernet': '8',
'Gigabit Ethernet': '384',
'Ten Gigabit Ethernet': '8'
},
'mem_size': {
'non-volatile configuration': '511'
},
'curr_config_register': '0x2102'
}
}
def test_empty(self):
self.dev1 = Mock(**self.empty_output)
version_obj = ShowVersion(device=self.dev1)
with self.assertRaises(AttributeError):
parsered_output = version_obj.parse()
def test_semi_empty(self):
self.dev2 = Mock(**self.semi_empty_output)
version_obj = ShowVersion(device=self.dev2)
with self.assertRaises(KeyError):
parsed_output = version_obj.parse()
def test_golden_asr1k(self):
self.maxDiff = None
self.dev_asr1k = Mock(**self.golden_output_asr1k)
version_obj = ShowVersion(device=self.dev_asr1k)
parsed_output = version_obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_asr1k)
def test_golden_c3850(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output_c3850)
version_obj = ShowVersion(device=self.dev_c3850)
parsed_output = version_obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_c3850)
def test_golden_isr4k(self):
self.maxDiff = None
self.dev_isr4k = Mock(**self.golden_output_isr4k)
version_obj = ShowVersion(device=self.dev_isr4k)
parsed_output = version_obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_isr4k)
def test_golden_asr901(self):
self.maxDiff = None
self.dev_asr901 = Mock(**self.golden_output_asr901)
version_obj = ShowVersion(device=self.dev_asr901)
parsed_output = version_obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_asr901)
def test_golden_c4507(self):
self.maxDiff = None
self.dev_c4k = Mock(**self.golden_output_c4507)
obj = ShowVersion(device=self.dev_c4k)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_c4507)
def test_golden_1(self):
self.maxDiff = None
self.dev_1 = Mock(**self.golden_output_1)
obj = ShowVersion(device=self.dev_1)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_1)
class TestDir(unittest.TestCase):
dev1 = Device(name='empty')
dev2 = Device(name='semi_empty')
dev_asr1k = | |
FilenamePrefix + "_" + str(i) + Suffix
#write to file
BasinChannelData.to_csv(OutputFilename, index=False)
def RemoveSmallSegments(BasinHillslopeData, n_traces=50):
"""
Remove hilltop segments with less than a specified number of traces in a basin
Args:
BasinHillslopeData (pandas dataframe): The dataframe containing the hillslope data (ridgelines). You get this using the ReadHillslopeData function
n_traces (int) the minimum number of traces
Author: FJC
"""
# remove segments shorter than the threshold length
BasinHillslopeData = BasinHillslopeData.groupby('StreamID').filter(lambda x: x.shape[0] > n_traces)
return BasinHillslopeData
#---------------------------------------------------------------------------------#
# ANALYSIS FUNCTIONS
#---------------------------------------------------------------------------------#
def DetermineSc(DataDirectory,FilenamePrefix,PlotDirectory):
"""
Determines the critical slope following Grieve et al. 2016 How Long is a Hillslope?
Args:
DataDirectory (str): the data directory
FilenamePrefix (str): the file name prefix
PlotDirectory (str): The directory into which the plots are saved
MDH
"""
# load the hillslopes data and isolate segments in basin
HillslopeData = ReadHillslopeData(DataDirectory, FilenamePrefix)
# get all the segments
Segments = HillslopeData.StreamID.unique()
Lh_segments = np.zeros(len(Segments))
R_segments = np.zeros(len(Segments))
# For each segment get collect hillslope data
# record the number of traces in each segment into a new dataframe
Data = pd.DataFrame(columns=['SegmentNo','Lh','LhLower','LhUpper','R','RLower','RUpper','NTraces'])
for i in range(0,len(Segments)):
#Get segment hillslope dataMChi,FlowLength,SegmentLength,
SegmentHillslopeData = HillslopeData[HillslopeData.StreamID == float(Segments[i])]
#hillslopes
Lh = SegmentHillslopeData.Lh.quantile(0.5)
LhUpper = SegmentHillslopeData.Lh.quantile(0.75)
LhLower = SegmentHillslopeData.Lh.quantile(0.25)
R = SegmentHillslopeData.R.quantile(0.5)
RLower = SegmentHillslopeData.R.quantile(0.25)
RUpper = SegmentHillslopeData.R.quantile(0.75)
NTraces = SegmentHillslopeData.size
#add to data frame
Data.loc[i] = [Segments[i],Lh,LhLower,LhUpper,R,RLower,RUpper,NTraces]
# remove rows with no data (i.e. no hillslope traces)
Data = Data.dropna()
Data = Data[Data.NTraces > 50]
# plot theoretical relationship
LH = np.arange(0.,200.,1.)
#Sc = 0.8
#pr = 2400 #kg/m3
#ps = 1400 #kg/m3
#K = 0.01 #m2/y
#E = 0.2 #m/y
#k = (pr*E)/(2.*ps*K)
#R = (Sc * (-1. + np.sqrt(1 + k**2. * LH**2.) + np.log(3.) - np.log(2. + np.sqrt(1. + k**2. * LH**2.))))/k
#R = LH*Sc
# declare colour map
#ColourMap = cm.plasma_r
#Create a figure instance for plotting Length vs Relief
CreateFigure(AspectRatio=3.)
# setup subplots
gs = gridspec.GridSpec(1, 2, width_ratios=[2.5, 1])
a0 = plt.subplot(gs[0])
# plot the raw data
a0.plot(HillslopeData.Lh,HillslopeData.R,'.',ms=2,color=[0.8,0.8,0.8])
# plot the segmented data
# Error bars with colours but faded (alpha)
for i, row in Data.iterrows():
#LhErr = np.array([[row.LhLower],[row.LhUpper]])
#RErr = np.array([[row.RLower],[row.RUpper]])
#plt.plot([row.Lh,row.Lh],RErr,'-', lw=1, color=[0.25,0.25,0.25], alpha=0.5,zorder=9)
#plt.plot(LhErr,[row.R,row.R],'-', lw=1, color=[0.25,0.25,0.25], alpha=0.5,zorder=9)
a0.plot(row.Lh,row.R,'.',ms=2,color=[0.25,0.25,0.25],zorder=32)
# set up range of S_c values to test and empty array for results
Sc_test = np.arange(0.5,1.,0.01)
Percent_Less_Than_Segs = np.zeros(len(Sc_test))
Percent_Less_Than_All = np.zeros(len(Sc_test))
NoSegments = len(Data)
NoHillslopes = len(HillslopeData)
for i in range(0,len(Sc_test)):
#print(Sc_test[i])
# get max hillslpoe relief
R_test = Data.Lh*Sc_test[i]
#compare to actual hillslope relief and count how many fall below max line
#first for segments
BelowMask = Data.R < R_test
NumberBelow = np.sum(BelowMask)
Percent_Less_Than_Segs[i] = 100.*float(NumberBelow)/float(NoSegments)
# get max hillslpoe relief
R_test = HillslopeData.Lh*Sc_test[i]
#and for all data
BelowMask = HillslopeData.R < R_test
NumberBelow = np.sum(BelowMask)
Percent_Less_Than_All[i] = 100.*float(NumberBelow)/float(NoHillslopes)
ind = np.argmin(np.abs(99.-Percent_Less_Than_Segs))
print("Sc = "+str(Sc_test[ind]))
Sc = np.around(Sc_test[ind],decimals=2)
R = LH*Sc
a0.plot(LH,R,'--', color='r',zorder=36)
a0.text(10,50,"$S_C = $"+str(Sc),rotation=20.)
plt.xlabel("Hillslope Length (m)")
plt.ylabel("Hillslope Relief (m)")
a0.set_xlim(0, 200)
a0.set_ylim(0, 200)
a0.text(0.05*2., 0.95, "(a)", transform=a0.transAxes, va='top', ha='right')
# Add axes for plotting Sc vs % less than
a1 = plt.subplot(gs[1])
a1.plot(Sc_test,Percent_Less_Than_Segs,'k-')
a1.plot(Sc_test,Percent_Less_Than_All,'-',color=[0.5,0.5,0.5])
a1.plot([0.5,Sc_test[ind],Sc_test[ind]],[99.,99.,0.],'r--',lw=0.5)
a1.text(Sc_test[ind],85,"$S_C = $"+str(Sc),rotation=-90)
plt.xlabel("$S_C$")
plt.ylabel("Percent Lower Relief")
plt.ylim(70,100)
a1.set_xlim(0.5,1.0)
a1.yaxis.tick_right()
a1.yaxis.set_label_position("right")
a1.text(0.95, 0.95, "(b)", transform=a1.transAxes, va='top', ha='right')
plt.tight_layout()
plt.savefig(PlotDirectory+"Determine_Sc.png",dpi=300)
plt.savefig(PlotDirectory+"Determine_Sc.pdf")
def CalculateEStarRStar(DataDirectory,FilenamePrefix,Basin,Sc=0.71):
"""
Calculate EStar and RStar here so that you can change the critical slope
Calculate for a specific basin.
Args:
DataDirectory (str): the data directory
FilenamePrefix (str): the file name prefix
Sc (float): The critical slope to use
returns: pandas data frame with Estar Rstar data and quantiles for hillslopes
organised by channel segments for the specified basin
MDH, Septmeber 2017
"""
# load the channel data
ChannelData = ReadChannelData(DataDirectory, FilenamePrefix)
#load the hillslopes data
HillslopeData = ReadHillslopeData(DataDirectory, FilenamePrefix)
# isolate basin data
BasinChannelData = ChannelData[ChannelData.basin_key == Basin]
# segments in the hillslope data
#Segments = BasinHillslopeData.StreamID.unique()
Segments = BasinChannelData.segment_number.unique()
# For each segment get the MChi value and collect dimensionless hillslope data
# record the number of traces in each segment inbto a new dataframe
Data = pd.DataFrame(columns=['SegmentNo','MChi','FlowLength','SegmentLength','EStar','EStarLower','EStarUpper','RStar','RStarLower','RStarUpper','NTraces'])
# Loop through the segments
for i in range(0,len(Segments)):
#Get segment hillslope data
SegmentHillslopeData = HillslopeData[HillslopeData.StreamID == float(Segments[i])]
#Get segment channel data and calculate flow length
SegmentChannelData = BasinChannelData[BasinChannelData.segment_number == Segments[i]]
#channels
MChi = SegmentChannelData.m_chi.unique()[0]
TempFL = SegmentChannelData.flow_distance
FlowLength = np.median(TempFL)
SegmentLength = np.max(TempFL)-np.min(TempFL)
#hillslopes
TempEs = (-2.*SegmentHillslopeData.Cht*SegmentHillslopeData.Lh)/Sc
TempRs = SegmentHillslopeData.S/Sc
#get the stats to plot
EStar = TempEs.quantile(0.5)
EStarUpper = TempEs.quantile(0.75)
EStarLower = TempEs.quantile(0.25)
RStar = TempRs.quantile(0.5)
RStarUpper = TempRs.quantile(0.75)
RStarLower = TempRs.quantile(0.25)
NTraces = SegmentHillslopeData.size
#add to data frame
Data.loc[i] = [Segments[i],MChi,FlowLength,SegmentLength,EStar,EStarLower,EStarUpper,RStar,RStarLower,RStarUpper,NTraces]
# remove rows with no data (i.e. no hillslope traces)
Data = Data.dropna(0,'any')
# only keep segments with more than 50 hillslope traces
#Data = Data[Data.NTraces > 50]
return Data
def CalculateRStar(EStar):
"""
Calculates E*
returns: an array (?) of RStar values.
MDH
"""
RStar = (1./EStar)*(np.sqrt(1.+(EStar**2.)) - np.log(0.5*(1.+np.sqrt(1+EStar**2.))) - 1.)
return RStar
def PlotEStarRStarTheoretical():
"""
This makes the theoretical E* vs R* plot. It prints to the current open figure.
SMM Note: This would be better if it used a supploed figure. Can the default be get_clf()?
MDH
"""
# Calculate analytical relationship
EStar = np.logspace(-1,3,1000)
RStar = CalculateRStar(EStar)
# Plot with open figure
plt.plot(EStar,RStar,'k--')
#-------------------------------------------------------------------------------#
# PLOTTING FUNCTIONS
#-------------------------------------------------------------------------------#
# SMM: Checked and working 13/06/2018
def PlotChiElevationSegments(DataDirectory, FilenamePrefix, PlotDirectory, BasinID):
"""
This plots the chi--elevation prfile with the segments used in the hilltop analyses plotted in random colours.
The segments are not the same as the ones determined by the segmentation algorithm. Instead they are bits of the chi
profile split up to see the correspondence between channel and hillslope data.
Args:
DataDirectory (str): the data directory
FilenamePrefix (str): the file name prefix
PlotDirectory (str): The directory into which the plots are saved
BasinID (int): The basin to be plotted
Author: MDH
"""
# load the channel data
ChannelData = ReadChannelData(DataDirectory, FilenamePrefix)
# isolate basin data
BasinChannelData = ChannelData[ChannelData.basin_key == BasinID]
MinimumChi = BasinChannelData.chi.min()
# how many segments are we dealing with?
Segments = BasinChannelData.segment_number.unique()
# setup the figure
Fig = CreateFigure()
# Get the data columns for plotting
for i in range(0, len(Segments)):
#get data arrays
Chi = ChannelData.chi[ChannelData.segment_number == Segments[i]]
Elevation = ChannelData.elevation[ChannelData.segment_number == Segments[i]]
SegmentedElevation = ChannelData.segmented_elevation[ChannelData.segment_number == Segments[i]]
#normalise chi by outlet chi
Chi = Chi-MinimumChi
#plot, colouring segments
Colour = np.random.rand()
plt.plot(Chi,Elevation,'k--',dashes=(2,2), lw=0.5,zorder=10)
plt.plot(Chi, SegmentedElevation, '-', lw=2, c=plt.cm.Paired(Colour),zorder=9)
# Finalise the figure
plt.xlabel(r'$\chi$ (m)')
plt.ylabel('Elevation (m)')
plt.title('Basin ID ' + str(BasinID))
plt.tight_layout()
plt.savefig(PlotDirectory+FilenamePrefix + "_" + str(BasinID) + "_ChiElevSeg.png", dpi=300)
plt.close(Fig)
# SMM: Checked and working 13/06/2018
def PlotLongProfileSegments(DataDirectory, FilenamePrefix, PlotDirectory, BasinID):
"""
This plots the chi--elevation prfile with the segments used in the hilltop analyses plotted in random colours.
The segments are not the same as the ones determined by the segmentation algorithm. Instead they are bits of the chi
profile split up to see the correspondence between channel and hillslope data.
Args:
DataDirectory (str): the data directory
FilenamePrefix (str): the file name prefix
PlotDirectory (str): The directory into which the plots are saved
BasinID (int): The basin to be plotted
Author: MDH
"""
# load the channel data
ChannelData = ReadChannelData(DataDirectory, FilenamePrefix)
# isolate basin data
BasinChannelData = ChannelData[ChannelData.basin_key == BasinID]
MinimumDistance = BasinChannelData.flow_distance.min()
# how many segments are we dealing with?
Segments = BasinChannelData.segment_number.unique()
# setup the figure
Fig = CreateFigure()
# Get the data columns for plotting
for i in range(0, len(Segments)):
#get data arrays
Dist = ChannelData.flow_distance[ChannelData.segment_number == Segments[i]]
Elevation = ChannelData.elevation[ChannelData.segment_number == Segments[i]]
SegmentedElevation = ChannelData.segmented_elevation[ChannelData.segment_number == Segments[i]]
#normalise distance by outlet distance
Dist = Dist-MinimumDistance
#plot, colouring segments
Colour = np.random.rand()
plt.plot(Dist/1000,Elevation,'k--',dashes=(2,2), lw=0.5,zorder=10)
plt.plot(Dist/1000, SegmentedElevation, '-', lw=2, c=plt.cm.Paired(Colour),zorder=9)
# Finalise the figure
plt.xlabel('Distance (km)')
plt.ylabel('Elevation (m)')
plt.title('Basin ID ' + str(BasinID))
plt.tight_layout()
plt.savefig(PlotDirectory+FilenamePrefix + "_" + str(BasinID) + "_LongProfSeg.png", dpi=300)
plt.close(Fig)
# SMM: Checked and working 13/06/2018
def PlotChiElevationMChi(DataDirectory, FilenamePrefix, PlotDirectory, BasinID):
"""
This function reads the channel data file and plots the chi-elevation profile along with the segments extracted from the segmentation algorithm.
It also colours the plot with the M_chi value | |
distribution function. Because the
Notes
-----
This is needed because the azimuthal bin locations in the
burst data FPI distribution functions is time dependent. By
extracting single distributions, the phi, theta, and energy
variables become time-independent and easier to work with.
Calculation of velocity and kinetic entropy can be found in
Liang, et al, PoP (2019) `[1]`_. The implementation here takes into
account the fact that the FPI energy bins are not equally spaced.
.. [1]: <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., … <NAME>. (2019). Decomposition of
plasma kinetic entropy into position and velocity space and the
use of kinetic entropy in particle-in-cell simulations. Physics
of Plasmas, 26(8), 82903. https://doi.org/10.1063/1.5098888
Parameters
----------
f : `xarray.DataArray`
A preconditioned 3D distribution function
mass : float
Mass (kg) of the particle species represented in the distribution.
E0 : float
Energy (eV) used to normalize the energy bins to [0, inf)
Returns
-------
S : `xarray.DataArray`
Velocity space entropy
'''
kB = constants.k # J/K
eV2J = constants.eV
eV2K = constants.value('electron volt-kelvin relationship')
# Assume that the azimuth and polar angle bins are equal size
dtheta = f['theta'].diff(dim='theta').mean().item()
dphi = f['phi'].diff(dim='phi_index').mean().item()
# Calculate the factors that associated with the normalized
# volume element
# - U ranges from [0, inf] and np.inf/np.inf = nan
# - Set the last element of y along U manually to 0
# - log(0) = -inf; Zeros come from theta and y. Reset to zero
# - Photo-electron correction can result in negative phase space
# density. log(-1) = nan
with np.errstate(invalid='ignore', divide='ignore'):
y = (np.sqrt(f['U']) / (1 - f['U'])**(5/2))
lnydy = np.log(y * np.sin(f['theta']) * dtheta * dphi)
y = y.where(np.isfinite(y), 0)
lnydy = lnydy.where(np.isfinite(lnydy), 0)
# Numerator
coeff = 1e-6/(3*N) * (2*eV2J*E0/mass)**(3/2) # m^6/s^3
num1 = (y * lnydy * (f_M - f)).integrate('phi')
num1 = (np.sin(f['theta']) * num1).integrate('theta')
num1 = 1e12 * coeff * num1.integrate('U')
num2 = (y * (f_M - f)).integrate('phi')
num2 = (np.sin(f['theta']) * num2).integrate('theta')
num2 = 1e12 * coeff * Distribution_Function._trapz(num2, num2['U'])
numerator = num1 + num2
# Denominator
d1 = 1
d2 = np.log(2**(2/3) * np.pi * kB * eV2K * T / (eV2J * E0))
d3 = (y * lnydy * f_M).integrate('phi')
d3 = (np.sin(f['theta']) * d3).integrate('theta')
d3 = 1e12 * coeff * d3.integrate('U')
d4 = (y * f_M).integrate('phi')
d4 = (np.sin(f['theta']) * d4).integrate('theta')
d4 = 1e12 * coeff * Distribution_Function._trapz(d4, d4['U'])
denominator = d1 + d2 - d3 - d4
return numerator, denominator
def pressure_3D(N, T):
'''
Calculate the epsilon entropy parameter [1]_ from a single 3D velocity space
distribution function.
.. [1] <NAME>., <NAME>., <NAME>., &
<NAME>. (2012). Inhomogeneous kinetic effects related
to intermittent magnetic discontinuities. Phys. Rev. E,
86(6), 66405. https://doi.org/10.1103/PhysRevE.86.066405
Notes
-----
This is needed because the azimuthal bin locations in the
burst data FPI distribution functions is time dependent. By
extracting single distributions, the phi, theta, and energy
variables become time-independent and easier to work with.
Parameters
----------
N : `xarray.DataArray`
Number density.
T : `xarray.DataArray`
Temperature tensor`.
Returns
-------
P : `xarray.DataArray`
Pressure tensor
'''
kB = constants.k
eV2K = constants.value('electron volt-kelvin relationship')
P = 1e15 * N * kB * eV2K * T
return P
def temperature_3D(f, mass, E0, N, V):
'''
Calculate the temperature tensor from a single 3D velocity space
distribution function.
Notes
-----
This is needed because the azimuthal bin locations in the
burst data FPI distribution functions is time dependent. By
extracting single distributions, the phi, theta, and energy
variables become time-independent and easier to work with.
Parameters
----------
f : `xarray.DataArray`
A preconditioned 3D distribution function
mass : float
Mass (kg) of the particle species represented in the distribution.
E0 : float
Energy used to normalize the energy bins to [0, inf)
N : `xarray.DataArray`
Number density computed from `f`.
V : `xarray.DataArray`
Bulk velocity computed from `f`.
Returns
-------
T : `xarray.DataArray`
Temperature tensor
'''
K2eV = constants.value('kelvin-electron volt relationship')
eV2J = constants.eV
kB = constants.k # J/k
# Integrate over phi
Txx = (np.cos(f['phi'])**2 * f).integrate('phi')
Tyy = (np.sin(f['phi'])**2 * f).integrate('phi')
Tzz = f.integrate('phi')
Txy = (np.cos(f['phi']) * np.sin(f['phi']) * f).integrate('phi')
Txz = (np.cos(f['phi']) * f).integrate('phi')
Tyz = (np.sin(f['phi']) * f).integrate('phi')
# Integrate over theta
Txx = (np.sin(Txx['theta'])**3 * Txx).integrate('theta')
Tyy = (np.sin(Tyy['theta'])**3 * Tyy).integrate('theta')
Tzz = (np.cos(Tzz['theta'])**2 * np.sin(Tzz['theta']) * Tzz).integrate('theta')
Txy = (np.sin(Txy['theta'])**3 * Txy).integrate('theta')
Txz = (np.cos(Txz['theta']) * np.sin(Txz['theta'])**2 * Txz).integrate('theta')
Tyz = (np.cos(Tyz['theta']) * np.sin(Tyz['theta'])**2 * Tyz).integrate('theta')
# Combine into tensor
T = xr.concat([xr.concat([Txx, Txy, Txz], dim='t_index_dim1'),
xr.concat([Txy, Tyy, Tyz], dim='t_index_dim1'),
xr.concat([Txz, Tyz, Tzz], dim='t_index_dim1'),
], dim='t_index_dim2'
)
T = T.assign_coords(t_index_dim1=['x', 'y', 'z'],
t_index_dim2=['x', 'y', 'z'])
# Integrate over energy
with np.errstate(divide='ignore', invalid='ignore'):
y = T['U']**(3/2) / (1 - T['U'])**(7/2)
y = y.where(np.isfinite(y), 0)
coeff = 1e6 * (2/mass)**(3/2) / (N * kB / K2eV) * (E0*eV2J)**(5/2)
Vij = xr.concat([xr.concat([V[0]*V[0],
V[0]*V[1],
V[0]*V[2]], dim='t_index_dim1'),
xr.concat([V[1]*V[0],
V[1]*V[1],
V[1]*V[2]], dim='t_index_dim1'),
xr.concat([V[2]*V[0],
V[2]*V[1],
V[2]*V[2]], dim='t_index_dim1')
], dim='t_index_dim2'
)
Vij = Vij.drop('velocity_index')
T = coeff * (y * T).integrate('U') - (1e6 * mass / kB * K2eV * Vij)
return T
def velocity_3D(f, mass, E0, N):
'''
Calculate the bulk velocity from a single 3D velocity space
distribution function.
Notes
-----
This is needed because the azimuthal bin locations in the
burst data FPI distribution functions is time dependent. By
extracting single distributions, the phi, theta, and energy
variables become time-independent and easier to work with.
Parameters
----------
f : `xarray.DataArray`
A preconditioned 3D distribution function
mass : float
Mass (kg) of the particle species represented in the distribution.
E0 : float
Energy used to normalize the energy bins to [0, inf)
N : `xarray.DataArray`
Number density computed from `f`.
Returns
-------
V : `xarray.DataArray`
Bulk velocity
'''
eV2J = constants.eV
# Integrate over phi
vx = (np.cos(f['phi']) * f).integrate('phi')
vy = (np.sin(f['phi']) * f).integrate('phi')
vz = f.integrate('phi')
# Integrate over theta
vx = (np.sin(vx['theta'])**2 * vx).integrate('theta')
vy = (np.sin(vy['theta'])**2 * vy).integrate('theta')
vz = (np.cos(vz['theta']) * np.sin(vz['theta']) * vz).integrate('theta')
V = xr.concat([vx, vy, vz], dim='velocity_index')
V = V.assign_coords({'velocity_index': ['Vx', 'Vy', 'Vz']})
# Integrate over Energy
with np.errstate(divide='ignore', invalid='ignore'):
y = V['U'] / (1 - V['U'])**3
y = y.where(np.isfinite(y), 0)
coeff = -1e3 * 2 * (eV2J * E0 / mass)**2 / N
V = coeff * (y * V).integrate('U')
return V
def vspace_entropy_3D(f, mass, E0, N, s):
'''
Calculate velocity space entropy from a single 3D velocity space
distribution function. Because the
Notes
-----
This is needed because the azimuthal bin locations in the
burst data FPI distribution functions is time dependent. By
extracting single distributions, the phi, theta, and energy
variables become time-independent and easier to work with.
Calculation of velocity and kinetic entropy can be found in
Liang, et al, PoP (2019) `[1]`_. The implementation here takes into
account the fact that the FPI energy bins are not equally spaced.
.. [1]: <NAME>., <NAME>., <NAME>., <NAME>., Drake,
<NAME>., <NAME>., … <NAME>. (2019). Decomposition of
plasma kinetic entropy into position and velocity space and the
use of kinetic entropy in particle-in-cell simulations. Physics
of Plasmas, 26(8), 82903. https://doi.org/10.1063/1.5098888
Parameters
----------
f : `xarray.DataArray`
A preconditioned 3D distribution function
mass : float
Mass (kg) of the particle species represented in the distribution.
E0 : float
Energy (eV) used to normalize the energy bins to [0, inf)
Returns
-------
S : `xarray.DataArray`
Velocity space entropy
'''
kB = constants.k # J/K
eV2J = constants.eV
# Assume that the azimuth and polar angle bins are equal size
dtheta = f['theta'].diff(dim='theta').mean().item()
dphi = f['phi'].diff(dim='phi_index').mean().item()
# Calculate the factors that associated with the normalized
# volume element
# | |
#import necessary libraries
import numpy as np
from arqtic.program import Program, Gate
from arqtic.qite import make_QITE_program
from arqtic.arqtic_for_ibm import ibm_circ_to_program, get_ibm_circuit
from arqtic.ds_compiler import get_constant_depth_program
from arqtic.real_time import heisenberg_evolution_program
from arqtic.observables import *
import os
#Create data directory
current=os.getcwd()
newdir="data"
path = os.path.join(current, newdir)
if not os.path.isdir(path):
os.makedirs(path)
class Simulation_Generator:
def __init__(self,file="input_file.txt",log="logfile.txt"):
#%matplotlib inline
input_file=open(file,'r')
data=input_file.readlines()
completename = os.path.join(path,log)
self.namevar=str(completename)
with open(self.namevar,'w') as tempfile:
tempfile.write("***ArQTiC Session Log File***\n\n")
#Default Parameters
self.H_BAR = 1
self.Jx=self.Jy=self.Jz=[]
self.hx=self.hy=self.hz=[]
self.td_Jx_func=self.td_Jy_func=self.td_Jz_func=[]
self.td_hx_func=self.td_hy_func=self.td_hz_func=[]
self.num_spins=2
self.initial_spins=[]
self.delta_t=1
self.steps=1
self.real_time="True"
self.QCQS="QS"
self.shots=1024
self.noise_choice="False"
self.device="ibmq_qasm_simulator"
self.plot_flag="True"
self.freq=0
self.time_dep_flag="False"
self.custom_time_dep="False"
self.programs_list=[]
self.backend="ibm"
self.ibm_circuits_list=[]
self.rigetti_circuits_list=[]
self.cirq_circuits_list=[]
self.qite_energies = []
self.compile="False"
self.compiler="native"
self.constant_depth="False"
self.observable="system_magnetization"
self.measure_dir="z"
self.time_func=np.cos
for i in range(len(data)-1):
value=data[i+1].strip()
if "*Jx" in data[i]:
self.Jx=value.split(' ')
elif "*Jy" in data[i]:
self.Jy=value.split(' ')
elif "*Jz" in data[i]:
self.Jz=value.split(' ')
elif "*hx" in data[i]:
self.hx=value.split(' ')
elif "*hy" in data[i]:
self.hy=value.split(' ')
elif "*hz" in data[i]:
self.hz=value.split(' ')
elif "*td_Jx_func" in data[i]:
self.td_Jx_func=value.split(' ')
elif "*td_Jy_func" in data[i]:
self.td_Jy_func=value.split(' ')
elif "*td_Jz_func" in data[i]:
self.td_Jz_func=value.split(' ')
elif "*td_hx_func" in data[i]:
self.td_hx_func=value.split(' ')
elif "*td_hy_func" in data[i]:
self.td_hy_func=value.split(' ')
elif "*td_hz_func" in data[i]:
self.td_hz_func=value.split(' ')
elif "*hbar" in data[i]:
if (value == "eVfs"):
self.H_BAR = 0.658212 # eV*fs
elif "*initial_spins" in data[i]:
self.initial_spins=value.split(' ')
elif "*delta_t" in data[i]:
self.delta_t=float(value)
elif "*steps" in data[i]:
self.steps=int(value)
elif "*real_time" in data[i]:
self.real_time=value
elif "*observable" in data[i]:
self.observable=value
elif "*beta" in data[i]:
self.beta=float(value)
elif "*delta_beta" in data[i]:
self.delta_beta=float(value)
elif "*domain" in data[i]:
self.domain=int(value)
elif "*num_spins" in data[i]:
self.num_spins=int(value)
elif "*QCQS" in data[i]:
self.QCQS=value
elif "*device" in data[i]:
self.device=value
elif "*backend" in data[i]:
self.backend=value
elif "*measure_dir" in data[i]:
self.backend=value
elif "*noise_choice" in data[i]:
self.noise_choice=value
elif "*plot_flag" in data[i]:
self.plot_flag=value
elif "*shots" in data[i]:
self.shots=int(value)
elif "*freq" in data[i]:
self.freq=float(value)
elif "*time_dep_flag" in data[i]:
self.time_dep_flag=value
elif "*compiler" in data[i]:
self.compiler=value
elif "*compile" in data[i]:
self.compile=value
elif "*constant_depth" in data[i]:
self.constant_depth=value
elif "*custom_time_dep" in data[i]:
self.custom_time_dep=value
if self.custom_time_dep in "True":
from time_dependence import external_func
print("Found an external time dependence function")
with open(self.namevar,'a') as tempfile:
tempfile.write("Found an external time dependence function\n")
self.time_func=external_func
#format array entries
#initial spin state
if (self.initial_spins == []):
self.initial_spins = np.zeros(self.num_spins)
else:
self.initial_spins = np.asarray(self.initial_spins)
#coupling parameters
if (len(self.Jx) > 0):
if (len(self.Jx) == 1):
self.Jx = np.full(self.num_spins-1, float(self.Jx[0]))
elif (self.Jx[0] == "random"):
lower = float(self.Jx[1])
upper = float(self.Jx[2])
self.Jx = np.random.uniform(lower,upper,self.num_spins-1)
else:
self.Jx = np.asarray([float(x) for x in self.Jx])
if (len(self.Jy) > 0):
if (len(self.Jy) == 1):
self.Jy = np.full(self.num_spins-1, float(self.Jy[0]))
elif (self.Jy[0] == "random"):
lower = float(self.Jy[1])
upper = float(self.Jy[2])
self.Jy = np.random.uniform(lower,upper,self.num_spins-1)
else:
self.Jy = np.asarray([float(x) for x in self.Jy])
if (len(self.Jz) > 0):
if (len(self.Jz) == 1):
self.Jz = np.full(self.num_spins-1, float(self.Jz[0]))
elif (self.Jz[0] == "random"):
lower = float(self.Jz[1])
upper = float(self.Jz[2])
self.Jz = np.random.uniform(lower,upper,self.num_spins-1)
else:
self.Jz = np.asarray([float(x) for x in self.Jz])
#external magnetic field
if (len(self.hx) > 0):
if (len(self.hx) == 1):
self.hx = np.full(self.num_spins, float(self.hx[0]))
elif (self.hx[0] == "random"):
lower = float(self.hx[1])
upper = float(self.hx[2])
self.hx = np.random.uniform(lower,upper,self.num_spins)
else:
self.hx = np.asarray([float(x) for x in self.hx])
if (len(self.hy) > 0):
if (len(self.hy) == 1):
self.hy = np.full(self.num_spins, float(self.hy[0]))
elif (self.hy[0] == "random"):
lower = float(self.hy[1])
upper = float(self.hy[2])
self.hy = np.random.uniform(lower,upper,self.num_spins)
else:
self.hy = np.asarray([float(x) for x in self.hy])
if (len(self.hz) > 0):
if (len(self.hz) == 1):
self.hz = np.full(self.num_spins, float(self.hz[0]))
elif (self.hz[0] == "random"):
lower = float(self.hz[1])
upper = float(self.hz[2])
self.hz = np.random.uniform(lower,upper,self.num_spins)
else:
self.hz = np.asarray([float(x) for x in self.hz])
#time dependence
if (self.time_dep_flag == "True"):
if(len(self.td_Jx_func) > 0):
func = []
func.append(self.td_Jx_func[0]) #time-dependent function name
for p in range(len(self.td_Jx_func) - 1):
func.append(float(self.td_Jx_func[1+p]))
self.td_Jx_func = func
if(len(self.td_Jy_func) > 0):
func = []
func.append(self.td_Jy_func[0]) #time-dependent function name
for p in range(len(self.td_Jy_func) - 1):
func.append(float(self.td_Jy_func[1+p]))
self.td_Jy_func = func
if(len(self.td_Jz_func) > 0):
func = []
func.append(self.td_Jz_func[0]) #time-dependent function name
for p in range(len(self.td_Jz_func) - 1):
func.append(float(self.td_Jz_func[1+p]))
self.td_Jz_func = func
if(len(self.td_hx_func) > 0):
func = []
func.append(self.td_hx_func[0]) #time-dependent function name
for p in range(len(self.td_hx_func) - 1):
func.append(float(self.td_hx_func[1+p]))
self.td_hx_func = func
if(len(self.td_hy_func) > 0):
func = []
func.append(self.td_hy_func[0]) #time-dependent function name
for p in range(len(self.td_hy_func) - 1):
func.append(float(self.td_hy_func[1+p]))
self.td_hy_func = func
if(len(self.td_hz_func) > 0):
func = []
func.append(self.td_hz_func[0]) #time-dependent function name
for p in range(len(self.td_hz_func) - 1):
func.append(float(self.td_hz_func[1+p]))
self.td_hz_func = func
def generate_programs(self):
programs = []
#create programs for real_time evolution
if (self.real_time == "True"):
#inital spin preparation program
init_prog = Program(self.num_spins)
index=0
for q in self.initial_spins:
if int(q)==1:
init_prog.add_gate(Gate([index], 'X'))
index+=1
else:
index+=1
#measurement preparation program
meas_prog = Program(self.num_spins)
if "x" in self.measure_dir:
for q in range(self.num_spins):
meas_prog.add_gate(Gate([q],'H',))
elif "y" in self.measure_dir:
for q in range(self.num_spins):
meas_prog.add_gate(Gate([q],'RX',angles=[-np.pi/2]))
#total program
for j in range(0, self.steps+1):
print("Generating timestep {} program".format(j))
with open(self.namevar,'a') as tempfile:
tempfile.write("Generating timestep {} program\n".format(j))
evolution_time = self.delta_t * j
evol_prog = heisenberg_evolution_program(self, evolution_time)
if (self.constant_depth == "True" and j>0):
evol_prog = get_constant_depth_program(evol_prog, self.num_spins)
total_prog = Program(self.num_spins)
total_prog.append_program(init_prog)
total_prog.append_program(evol_prog)
total_prog.append_program(meas_prog)
programs.append(total_prog)
self.programs_list=programs
#create programs for imaginary time evolution
else:
qite_program, energies = make_QITE_program(self)
programs.append(qite_program)
self.qite_energies = energies
self.programs_list=programs
def generate_circuits(self):
self.generate_programs()
#convert to backend specific circuits if one is requested
if self.backend in "ibm":
self.generate_ibm()
if self.backend in "rigetti":
self.generate_rigetti()
if self.backend in "cirq":
self.generate_cirq()
def generate_ibm(self):
#generate IBM-specific circuits
#IBM imports
import qiskit as qk
from qiskit.tools.monitor import job_monitor
from qiskit.visualization import plot_histogram, plot_gate_map, plot_circuit_layout
from qiskit import Aer, IBMQ, execute
from qiskit.providers.aer import noise
from qiskit.providers.aer.noise import NoiseModel
from qiskit.circuit import quantumcircuit
from qiskit.circuit import Instruction
print("Creating IBM quantum circuit objects...")
with open(self.namevar,'a') as tempfile:
tempfile.write("Creating IBM quantum circuit objects...\n")
name=0
self.q_regs = qk.QuantumRegister(self.num_spins, 'q')
self.c_regs = qk.ClassicalRegister(self.num_spins, 'c')
backend = self.device
self.ibm_circuits_list=[]
for program in self.programs_list:
ibm_circ = get_ibm_circuit(backend, program,self.q_regs,self.c_regs,self.device)
self.ibm_circuits_list.append(ibm_circ)
print("IBM quantum circuit objects created")
with open(self.namevar,'a') as tempfile:
tempfile.write("IBM quantum circuit objects created\n")
if (self.compile == "True"):
provider = qk.IBMQ.get_provider(group='open')
#print(provider.backends())
device = provider.get_backend(self.device)
if (self.compiler == "native"):
print("Transpiling circuits...")
with open(self.namevar,'a') as tempfile:
tempfile.write("Transpiling circuits...\n")
temp=qk.compiler.transpile(self.ibm_circuits_list,backend=device,optimization_level=2)
self.ibm_circuits_list=temp
print("Circuits transpiled successfully")
with open(self.namevar,'a') as tempfile:
tempfile.write("Circuits transpiled successfully\n")
elif (self.compiler == "tket"):
from pytket.qiskit import qiskit_to_tk
from pytket.backends.ibm import IBMQBackend, IBMQEmulatorBackend, AerBackend
from pytket.qasm import circuit_to_qasm_str
if self.device == "":
tket_backend = AerBackend()
else:
if (self.QCQS == "QC"):
tket_backend = IBMQBackend(self.device)
else:
tket_backend = IBMQEmulatorBackend(self.device)
print("Compiling circuits...")
with open(self.namevar,'a') as tempfile:
tempfile.write("Compiling circuits...\n")
circs = []
for circuit in self.ibm_circuit_list:
tket_circ = qiskit_to_tk(circuit)
tket_backend.compile_circuit(tket_circ)
qasm_str = circuit_to_qasm_str(tket_circ)
ibm_circ = qk.QuantumCircuit.from_qasm_str(qasm_str)
circs.append(tket_circ)
self.ibm_circuits_list=circs
print("Circuits compiled successfully")
with open(self.namevar,'a') as tempfile:
tempfile.write("Circuits compiled successfully\n")
def generate_rigetti(self):
import pyquil
from pyquil.quil import Program
from pyquil.gates import H, RX, RZ, CZ, RESET, MEASURE
from pyquil.api import get_qc
self.rigetti_circuits_list=[]
#Rigettti imports
import pyquil
from pyquil.quil import Program
from pyquil.gates import H, RX, RZ, CZ, RESET, MEASURE
from pyquil.api import get_qc
print("Creating Pyquil program list...")
with open(self.namevar,'a') as tempfile:
tempfile.write("Creating Pyquil program list...\n")
for circuit in self.programs_list:
p = pyquil.Program(RESET()) #compressed program
ro = p.declare('ro', memory_type='BIT', memory_size=self.num_spins)
for gate in circuit.gates:
if gate.name != "":
if gate.name in "X":
p.inst(pyquil.gates.X(gate.qubits[0]))
elif gate.name in "Y":
p.inst(pyquil.gates.Y(gate.qubits[0]))
elif gate.name in "Z":
p.inst(pyquil.gates.Z(gate.qubits[0]))
elif gate.name in "H":
p.inst(pyquil.gates.H(gate.qubits[0]))
elif gate.name in "RZ":
p.inst(pyquil.gates.RZ(gate.angles[0],gate.qubits[0]))
elif gate.name in "RX":
p.inst(pyquil.gates.RX(gate.angles[0],gate.qubits[0]))
elif gate.name in "CNOT":
p.inst(pyquil.gates.CNOT(gate.qubits[0],gate.qubits[1]))
else:
print("Unrecognized gate: {}".format(gate.name))
for i in range(self.num_spins):
p.inst(pyquil.gates.MEASURE(i,ro[i]))
p.wrap_in_numshots_loop(self.shots)
self.rigetti_circuits_list.append(p)
if "True" in self.compile:
if self.QCQS in ["QS"]:
qc=get_qc(self.device, as_qvm=True)
else:
qc=get_qc(self.device)
qc.compiler.set_timeout(100)
if self.compiler in "native":
temp=[]
print("Transpiling circuits...")
with open(self.namevar,'a') as tempfile:
tempfile.write("Transpiling circuits...\n")
for circuit in self.rigetti_circuits_list:
circ = qc.compile(circuit)
temp.append(circ)
self.rigetti_circuits_list=temp
print("Circuits transpiled successfully")
with open(self.namevar,'a') as tempfile:
tempfile.write("Circuits transpiled successfully\n")
elif self.compiler in "tket":
temp=[]
from pytket.pyquil import pyquil_to_tk
from pytket.backends.forest import ForestBackend
if self.device == "":
qvm = '{}q-qvm'.format(self.num_spins)
tket_backend = ForestBackend(qvm, simulator=True)
else:
if self.QCQS in ["QC"]:
tket_backend = ForestBackend(self.device)
else:
tket_backend = ForestBackend(self.device, simulator=True)
print("Compiling circuits...")
with open(self.namevar,'a') as tempfile:
tempfile.write("Compiling circuits...\n")
for circuit in self.rigetti_circuits_list:
tket_circ = qiskit_to_tk(circuit)
tket_backend.compile_circuit(tket_circ)
temp.append(tket_circ)
self.ibm_circuits_list=temp
print("Circuits compiled successfully")
with open(self.namevar,'a') as tempfile:
tempfile.write("Circuits compiled successfully\n")
print("Pyquil program list created successfully")
with open(self.namevar,'a') as tempfile:
tempfile.write("Pyquil program list created successfully\n")
def generate_cirq(self):
self.cirq_circuits_list=[]
#Cirq imports
import | |
if not isinstance(model_desc, MDescriptor):
raise TypeError("Invalid model descriptor")
if not model_desc.validate():
raise ValueError("Model definition not successfully validated")
if model_desc.name not in self._mReg:
self._mReg.add(model_desc)
else:
raise KeyError('Model with this name already exists in registry')
def remove(self, name):
if name in self._mReg:
del(self._mReg[name])
else:
raise KeyError('Model with this name does not exist in registry')
__delitem__ = remove
def open_trans(self, name):
"""Open a model transformation transaction"""
if self.trans is None:
self.trans = ModelTransform(name, self.__getitem__(name))
return self._mReg.descs[name]
else:
raise AssertionError("A transaction is already open")
def rollback_trans(self):
if self.trans is None:
raise AssertionError("No transaction open")
else:
self.trans = None
def commit_trans(self, new_name, description=''):
if self.trans is None:
raise AssertionError("No transaction open")
else:
self.add(self.trans.commit(new_name))
self.trans = None
def build(self, name, icvalues=None, parvalues=None,
inputs=None, tdata=None):
try:
mdesc = copy.deepcopy(self._mReg[name])
except KeyError:
raise KeyError("No such model description")
for gd in mdesc.generatorspecs.values():
gd.modelspec.flattenSpec(ignoreInputs=True, force=True)
filt_keys = ('userevents', 'userfns', 'unravelInfo',
'inputs', 'checklevel', 'activateAllBounds',
'generatorspecs', 'indepvar',
'parvalues', 'icvalues', 'reuseTerms',
'withJac', 'withJacP', 'tdata',
'abseps', 'eventtol', 'eventPars',
'withStdEvts', 'stdEvtArgs')
if icvalues is not None:
mdesc.icvalues.update(icvalues)
if parvalues is not None:
mdesc.parvalues.update(parvalues)
if inputs is not None:
mdesc.inputs.update(inputs)
if tdata is not None:
mdesc.tdata = tdata
if not mdesc.isinstantiable(True):
raise ValueError("Model description incomplete: not instantiable")
## would like ModelConstructor to be able to deal with the remaining
# keys of mdesc so that all the information in mdesc gets passed into
# the _mspec attribute of the instantiated model, otherwise mdesc needs
# to be stored somewhere else.
mc = ModelConstructor(mdesc.name,
**common.filteredDict(dict(mdesc), filt_keys))
assert len(mdesc.generatorspecs) > 0, "No Generator descriptions found"
for gdname, gd in mdesc.generatorspecs.items():
if gd.userEvents is not None:
mc.addEvents(gdname, gd.userEvents)
if gd.userFunctions is not None:
mc.addFunctions(gdname, gd.userFunctions)
if gd.userEventMaps is not None:
for em in gd.userEventMaps:
try:
# in case evmap included
evname, target, evmap = em
except ValueError:
# otherwise expect just these
evname, target = em
evmap = None
mc.mapEvent(gdname, evname, target, evmap)
model = mc.getModel()
self._mReg[name] = model
# shortcut
self.instances = self._mReg.instances
def _infostr(self, verbose=1):
if verbose == 0:
outputStr = 'Project: '+self.proj_name
elif verbose > 0:
outputStr = 'Project: '+self.proj_name
if len(self._mReg):
for m in self._mReg:
outputStr += "\n" + m._infostr(verbose-1)
else:
outputStr += 'No models in Project '+self.proj_name
return outputStr
def __repr__(self):
return self._infostr(verbose=0)
__str__ = __repr__
def info(self, verboselevel=1):
print(self._infostr(verboselevel))
# -----------------------------------------------------------------------------
class ModelTransform(object):
"""Model Transformer class.
"""
def __init__(self, name, model):
if not isinstance(model, MDescriptor):
raise TypeError("ModelTransform must be initialized with a "
"MDescriptor object")
self.orig_model_name = name
self.orig_model = model
self.trans_model = copy.deepcopy(model)
self.changelog = []
self.gentrans = None # transactions for any GenTransforms
def remove(self, obj):
"Remove hybrid model generator"
self.trans_model.remove(obj)
self.changelog.append(common.args(action='remove',
target=obj.modelspec.name))
def add(self, obj):
"Add hybrid model generator"
self.trans_model.add(obj)
self.changelog.append(common.args(action='add',
target=obj.modelspec.name))
def open_gentrans(self, name):
"""Open a generator transformation transaction"""
if self.gentrans is None:
if name in self.trans_model.generatorspecs:
self.gentrans = GenTransform(name,
self.trans_model.generatorspecs[name],
self.trans_model.icvalues,
self.trans_model.parvalues,
self.trans_model.inputs)
else:
raise KeyError('Generator %s does not exist in registry'%name)
return self.trans_model.generatorspecs[name]
else:
raise AssertionError("A transaction is already open")
def rollback_gentrans(self):
if self.gentrans is None:
raise AssertionError("No transaction open")
else:
self.gentrans = None
def commit_gentrans(self, new_name, description=''):
if self.gentrans is None:
raise AssertionError("No transaction open")
else:
self.add(self.gentrans.commit(new_name))
del self.trans_model.generatorspecs[self.gentrans.orig_gen_name]
# update these if they were changed by gen transformation
self.trans_model.icvalues = self.gentrans.model_icvalues
self.trans_model.parvalues = self.gentrans.model_parvalues
self.trans_model.inputs = self.gentrans.model_inputs
self.gentrans = None
def unresolved(self):
"""Returns the unresolved inconsistencies in model's internal
interfaces.
"""
return self.trans_model.validate()[1]
def commit(self, new_name):
"""Verifies internal interface consistency before returning new
model spec.
"""
if self.changelog == []:
raise PyDSTool_ValueError("No changes made")
validated, inconsistencies = self.trans_model.validate()
if validated:
self.trans_model.name = new_name
self.trans_model.orig_name = self.orig_model.name
self.trans_model.changelog = copy.copy(self.changelog)
return self.trans_model
else:
print("Internal interface inconsistencies: %r" % inconsistencies)
raise PyDSTool_ValueError("New Model spec cannot be committed")
class GenTransform(object):
"""Generator Transformer class.
Acts on GDescriptor objects that define Generators.
For these, the only non-trivial transformations are inside the modelspec
attribute.
"""
def __init__(self, name, gen, model_icvalues=None, model_parvalues=None,
model_inputs=None):
if not isinstance(gen, GDescriptor):
raise TypeError("GenTransform must be initialized with a "
"GDescriptor object")
self.orig_gen_name = name
self.orig_gen = gen
self.trans_gen = copy.deepcopy(gen)
self.changelog = []
if model_icvalues is None:
self.model_icvalues = {}
else:
self.model_icvalues = model_icvalues
if model_parvalues is None:
self.model_parvalues = {}
else:
self.model_parvalues = model_parvalues
if model_inputs is None:
self.model_inputs = {}
else:
self.model_inputs = model_inputs
def remove(self, obj):
"""Remove component, parameter, variable, input, function"""
self.trans_gen.modelspec.remove(obj)
self.changelog.append(common.args(action='remove', target=obj.name))
def add(self, parent_name, obj):
"""Add component, parameter, variable, input, function"""
# resolve parent_name structure
self.trans_gen.modelspec.add(obj, parent_name)
self.changelog.append(common.args(action='add', target=obj.name))
def findStaticVars(self):
"""Find RHSfuncSpec variables with RHS=0"""
return [v for v in self.trans_gen.modelspec.search(Var) if \
gen.modelspec[v].spec.specStr == '0']
def changeTargetGen(self, target):
"""Change target generator type. Target is a string name of the Generator
class."""
self.trans_gen.target = target
def changeDomain(self, obj_name, domain):
"""Change valid domain of a quantity"""
try:
self.trans_gen.modelspec[obj_name].setDomain(domain)
except (KeyError, AttributeError):
raise PyDSTool_TypeError("Invalid quantity for domain change")
self.changelog.append(common.args(action='changeDomain', \
target=obj_name, pars=(domain,)))
def redefineQuantity(self, obj_name, specstr):
"""Redefine a Quantity using a new specification string,
leaving its type unchanged.
"""
try:
obj = self.trans_gen.modelspec[obj_name]
except KeyError:
raise PyDSTool_ValueError("Unknown object")
try:
obj.spec.redefine(specstr)
except AttributeError:
raise PyDSTool_TypeError("Invalid quantity for redefinition")
self.trans_gen.modelspec.remove(obj_name)
if parseUtils.isHierarchicalName(obj_name):
parts = obj_name.split(parseUtils.NAMESEP)
parent_name = ".".join(parts[:-1])
obj.rename(".".join(parts[1:]))
else:
parent_name = None
self.trans_gen.modelspec.add(obj, parent_name)
self.changelog.append(common.args(action='redefineQuantity', \
target=obj_name, pars=(specstr,)))
def convertQuantity(self, obj_name, targetType, targetSpecType=None):
"""Convert quantity between parameter, variable, or input types.
If parameter -> variable, the RHS will be set to zero ('static'
variable).
"""
try:
obj = self.trans_gen.modelspec[obj_name]
except KeyError:
raise PyDSTool_ValueError("Unknown object")
if parseUtils.isHierarchicalName(obj_name):
parent_name = obj_name.split(parseUtils.NAMESEP)[0]
else:
parent_name = ''
try:
currentType = obj.typestr
assert currentType in ('par', 'var', 'input')
assert targetType in ('par', 'var', 'input')
except (AttributeError, AssertionError):
raise PyDSTool_TypeError("Only convert between parameter, variable or "
"input quantity types")
if targetType == currentType:
if currentType != 'var' or obj.specType is None:
# either (1) par->par, (2) input->input, or
# (3) var->var with no specType to change
# In any of these cases, nothing to do
return
if currentType == 'var':
assert obj.specType in ('RHSfuncSpec', 'ExpFuncSpec'), \
"Cannot process implicit function specs"
if targetType == 'var':
assert targetSpecType in ('RHSfuncSpec', 'ExpFuncSpec'), \
"target specType must be RHSfuncSpec of ExpFuncSpec only"
if targetType == 'par':
if currentType == 'var' and obj_name in self.model_icvalues:
# use existing initial condition for variable as parameter value
new_obj = Symbolic.Par(repr(self.model_icvalues[obj_name]),
obj.name, domain=obj.domain)
#del(self.trans_gen.icvalues[obj_name])
else:
#if currentType == 'input' and name in self.model_inputs:
# del(self.model_inputs[obj_name])
new_obj = Symbolic.Par(obj.name, domain=obj.domain)
elif targetType == 'input':
#if currentType == 'var' and name in self.model_icvalues:
# del(self.model_icvalues[name])
#elif currentType == 'par' and name in self.model_parvalues:
# del(self.model_parvalues[name])
new_obj = Symbolic.Input(obj.name, domain=obj.domain)
elif targetType == 'var':
new_obj = Symbolic.Var('0', obj_name, domain=obj.domain,
specType=targetSpecType)
if currentType == 'par':
try:
val = float(obj.spec())
except ValueError:
if obj_name in self.model_parvalues:
val = self.model_parvalues[obj_name]
else:
val = None
if val is not None:
# par had a value already, so use that for the
# initial condition of this var
self.model_icvalues[obj_name] = val
#elif currentType == 'input' and name in self.model_inputs:
# del(self.model_inputs[obj_name])
else:
raise PyDSTool_TypeError("Invalid conversion")
self.trans_gen.modelspec.remove(obj_name)
self.trans_gen.modelspec.add(new_obj, parent_name)
self.changelog.append(common.args(action='convertQuantity',
target=obj_name,
pars=(targetType, targetSpecType)))
def convertComponent(self, obj_name, targetType):
"""Convert component object to given type (provide actual type),
provided the new type is compatible with the old one.
"""
try:
obj = self.trans_gen.modelspec[obj_name]
except KeyError:
raise PyDSTool_ValueError("Unknown object")
if parseUtils.isHierarchicalName(obj_name):
parent_name = obj_name.split(parseUtils.NAMESEP)[0]
else:
parent_name = ''
currentType = common.className(obj)
if not isinstance(obj, ModelSpec.ModelSpec):
raise PyDSTool_TypeError("Only convert ModelSpec Component objects")
if targetType == currentType:
# nothing to do
return
if not obj.compatibleContainers == targetType.compatibleContainers or \
not obj.compatibleSubcomponents == targetType.compatibleSubcomponents:
raise PyDSTool_TypeError("Only convert to equivalently-compatible type")
new_obj = targetType(obj.name)
new_obj.__dict__.update(obj.__dict__)
self.trans_gen.modelspec.remove(obj)
self.trans_gen.modelspec.add(new_obj, parent_name)
self.changelog.append(common.args(action='convertComponent', target=obj.name,
pars=(common.className(targetType),)))
def makeStaticVar(self, obj_name):
"""Force RHSfuncSpec variable to have RHS=0.
"""
try:
obj = self.trans_gen.modelspec[obj_name]
except KeyError:
raise PyDSTool_ValueError("Unknown object")
if parseUtils.isHierarchicalName(obj_name):
parent_name = obj_name.split(parseUtils.NAMESEP)[0]
else:
parent_name = ''
if obj.typestr != 'var' and obj.specType != 'RHSfuncSpec':
raise PyDSTool_TypeError("Invalid variable object passed")
new_obj = Symbolic.Var('0', obj.name, domain=obj.domain,
specType='RHSfuncSpec')
self.trans_gen.modelspec.remove(obj)
self.trans_gen.modelspec.add(new_obj, parent_name)
self.changelog.append(common.args(action='makeStaticVar',
target=obj.name))
def unresolved(self):
"""Returns the generator spec's remaining free symbols.
"""
return self.trans_gen.validate()[1]
def commit(self, new_name):
"""Verifies completeness of definition before | |
in list(self.config_dict_base[key].keys()):
yangpath=self.model_url_name_map[self.config_dict_base[key]["@xmlns"]]["module"][0]
self.grpc_getparam_list_base.append(yangpath+":"+key)
else:
self.config_dict_base.pop(key)
elif (dict_type == "candidate"):
for key in list(self.config_dict.keys()):
if isinstance(self.config_dict[key], list):
for idx,item in enumerate(self.config_dict[key]):
if "@xmlns" in list(item.keys()):
if item["@xmlns"] in list(self.model_url_name_map.keys()):
yangpath= self.model_url_name_map[item["@xmlns"]]["module"][0]
self.grpc_getparam_list.append(yangpath+":"+key)
else:
self.config_dict[key].pop(idx)
else:
if self.config_dict[key]["@xmlns"] in list(self.model_url_name_map.keys()):
if "@xmlns" in list(self.config_dict[key].keys()):
yangpath=self.model_url_name_map[self.config_dict[key]["@xmlns"]]["module"][0]
self.grpc_getparam_list.append(yangpath+":"+key)
else:
self.config_dict.pop(key)
except Exception as e:
print("Failed to extract gRPC params, error: "+str(e))
sys.exit(1)
def grpc_get_config(self, dict_type=None):
if dict_type is None:
print("Dictionary type not specified - specify \"base\" or \"candidate\", aborting...")
sys.exit(1)
if (dict_type == "base"):
for pathyang in self.grpc_getparam_list_base:
path = '{"'+pathyang+'": [null]}'
try:
err, result = self.grpc_client.getconfig(path)
if err:
print(err)
sys.exit(1)
self.grpc_json_base.update(json.loads(result))
except Exception as e:
print(
'Unable to perform gRPC get_config'
)
sys.exit(1)
elif (dict_type == "candidate"):
for pathyang in self.grpc_getparam_list:
path = '{"'+pathyang+'": [null]}'
try:
err, result = self.grpc_client.getconfig(path)
if err:
print(err)
sys.exit(1)
self.grpc_json.update(json.loads(result))
except Exception as e:
print(
'Unable to perform gRPC get_config'
)
sys.exit(1)
def grpc_config_merge(self, data, retry=3, interval=150):
if not self.netconf_only:
count=0
result=False
while (count < retry):
try:
response = self.grpc_client.mergeconfig(data)
if response.errors:
err = json.loads(response.errors)
print("Failed to merge configuration via gRPC, error: "+str(err))
result=False
count=count+1
time.sleep(interval)
continue
result=True
break
except Exception as e:
print(
'Failed to merge configuration via gRPC, error:'+str(e)
)
result=False
count=count+1
time.sleep(interval)
if not result:
print(data)
return result
else:
if self.debug:
print("Skipping gRPC config merge since gRPC port wasn't specified")
def grpc_check_last_cli_commit(self):
if not self.netconf_only:
try:
last_commit="show configuration commit list 1 detail"
response = self.grpc_client.showcmdtextoutput(last_commit)
for line in response:
print(line)
last_commit_changes="show configuration commit changes last 1"
response = self.grpc_client.showcmdtextoutput(last_commit_changes)
for line in response:
print(line)
except Exception as e:
print("Failed to fetch last commit data, error:"+str(e))
sys.exit(1)
else:
if self.debug:
print("Skipping gRPC last cli commit check since gRPC port wasn't specified")
def write_nc_xml_to_file(self, filepath, dict_type=None):
if dict_type is None:
print("Specify dict type before writing xml to file, options: \"base\" or \"candidate\", aborting")
sys.exit(1)
with open(filepath, 'w') as yang_xml_fd:
if (dict_type == "base"):
yang_xml_fd.write(xmltodict.unparse(self.nc_dict_base, pretty=True))
elif (dict_type == "candidate"):
yang_xml_fd.write(xmltodict.unparse(self.nc_dict, pretty=True))
print("Router's CLI configuration converted into YANG XML and saved in file: "+str(filepath))
def write_grpc_json_to_file(self, filepath):
if not self.netconf_only:
with open(filepath, 'w') as yang_json_fd:
yang_json_fd.write(json.dumps(self.grpc_json, indent=4))
print("Router's CLI configuration converted into YANG JSON and saved in file: "+str(filepath))
else:
if self.debug:
print("Skipping gRPC file creation since gRPC port wasn't specified")
if __name__ == '__main__':
try:
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--server', action='store', dest='host',
help='IP address of netconf server and gRPC server on the router')
parser.add_argument('-n', '--netconf-port', action='store', dest='nc_port',
help='netconf port')
parser.add_argument('-g', '--grpc-port', action='store', dest='grpc_port',
help='gRPC port -- IMPORTANT: Not supported in this version. Support using GNMI will be brought in soon.')
parser.add_argument('-l', '--xr-lnx-ssh-port', action='store', dest='xr_lnx_ssh_port',
help='XR linux shell SSH port')
parser.add_argument('-u', '--username', action='store', dest='username',
help='IOS-XR AAA username')
parser.add_argument('-p', '--password', action='store', dest='password',
help='IOS-XR AAA password')
parser.add_argument('-c', '--input-cli-file', action='store', dest='input_cli_file',
help='Specify input file path for CLI configuration to convert into netconf RPC ')
parser.add_argument('-b', '--base-config-file', action='store', dest='base_config_file',
help='Specify file path for base CLI configuration to apply to device before starting, by default: ./base.config')
parser.add_argument('-d', '--debug', action='store_true', dest='debug',
help='Enable debugging')
parser.add_argument('-t', '--test-merge', action='store_true', dest='test',
help='Test config merge with each output file')
parser.add_argument('-x', '--nc-xml-file', action='store', dest='nc_xml_file',
help='Specify output file path for netconf based XML output ')
parser.add_argument('-v', '--verbose', action='store_true', dest='verbose',
help='Enable verbose logging - useful for debugging ncclient RPCs')
parser.add_argument('-j', '--grpc-json-file', action='store', dest='grpc_json_file',
help='Specify output file path for gRPC based JSON output')
parser.add_argument('-o', '--openconfig', action='store_true', dest='openconfig',
help='Enable translation of CLI into openconfig model - by default it is off. This is done because not all XR platforms respond with Openconfig equivalent in GET requests but do respond with Native model formats. Also some Openconfig models have been in flux and testing the models sometimes fails. \n If it works, try the -o flag along with the -t flag. If test fails, use the -o flag without the -t flag to atleast get the openconfig equivalent where possible. Else skip the -o flag altogether.')
except SystemExit:
print("Invalid arguments provided, Error: " + str(sys.exc_info()[1]))
parser.print_help()
results = parser.parse_args()
if not ( results.host or
results.nc_port or
results.grpc_port or
results.xr_lnx_ssh_port or
results.input_cli_file or
results.base_config_file or
results.username or
results.password or
results.debug or
results.openconfig or
results.verbose or
results.test):
parser.print_help()
sys.exit(0)
if results.debug:
rootLogger = logging.getLogger('ncclient.transport.session')
rootLogger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
rootLogger.addHandler(handler)
if results.verbose:
logging.basicConfig(level=logging.DEBUG)
if results.openconfig:
enable_openconfig = True
print("WARNING: not all XR platforms respond with Openconfig equivalent in GET requests but do respond with Native model formats. Also some Openconfig models have been in flux and testing the models sometimes fails. \n If it works, try the -o flag along with the -t flag. If test fails, use the -o flag without the -t flag to atleast get the openconfig equivalent where possible. Else skip the -o flag altogether.")
else:
enable_openconfig = False
if results.nc_xml_file:
nc_xml_file=results.nc_xml_file
else:
nc_xml_file='./yang_nc.xml'
if results.grpc_json_file:
grpc_json_file=results.grpc_json_file
else:
grpc_json_file='./yang_grpc.json'
if results.base_config_file:
base_config_file=results.base_config_file
else:
base_config_file='./base.config'
client=YangCLIClient(host=results.host,
nc_port=results.nc_port,
grpc_port=results.grpc_port,
xr_lnx_ssh_port=results.xr_lnx_ssh_port,
cli_config_file=results.input_cli_file,
base_config_file=base_config_file,
username=results.username,
password=<PASSWORD>,
debug=results.debug)
print("Determining diff between original YANG XML (base config) and current YANG XML (input CLI config)...")
client.list_diff(protocol="netconf")
print("Resetting the router configuration back to its original state...")
#Finally rewrite original configuration of the device
client.cli_replace_config(client.original_config_file)
xml_dict = OrderedDict()
if 'dictionary_item_added' in client.netconf_diff:
dict_items = list(client.netconf_diff['dictionary_item_added'])
for item in dict_items:
item_keys = item[item.startswith("root") and len("root"):]
item_keys = item_keys.strip('[]').replace('][',',')
item_key_list = item_keys.split(',')
item_key_list = [i.replace('\'', '') for i in item_key_list]
dict_to_modify = copy.deepcopy(client.nc_dict)
diff_dict = OrderedDict([(item_key_list[-1], getFromDict(client.nc_dict, item_key_list))])
#diff_dict.update(getTreeFromDictPath(dict_to_modify, item_key_list))
dict_path = OrderedDict()
dict_path.update(getTreeFromDictPath(dict_to_modify, item_key_list))
xml_dict.update(dict_path['config'])
if 'iterable_item_added' in client.netconf_diff:
iterable_items = list(client.netconf_diff['iterable_item_added'])
iterable_dict = OrderedDict()
list_dict = {}
for item in iterable_items:
item_keys = item[item.startswith("root") and len("root"):]
item_keys = item_keys.strip('[]').replace('][',',')
item_key_list = item_keys.split(',')
item_key_list = [i.replace('\'', '') for i in item_key_list]
list_dict.update({('_').join(item_key_list[:-1]): [] })
for item in iterable_items:
item_keys = item[item.startswith("root") and len("root"):]
item_keys = item_keys.strip('[]').replace('][',',')
item_key_list = item_keys.split(',')
item_key_list = [i.replace('\'', '') for i in item_key_list]
list_index = item_key_list[-1]
item_key_list = item_key_list[:-1]
fetch_list = list(getFromDict(client.nc_dict, item_key_list))
diff_element = fetch_list[int(list_index)]
list_dict[('_').join(item_key_list)].append(diff_element)
dict_to_modify = copy.deepcopy(client.nc_dict)
iterable_dict.update(getTreeFromDictPath(dict_to_modify, item_key_list))
changeInDict(iterable_dict, item_key_list[:-1], item_key_list[-1], list_dict[('_').join(item_key_list)])
xml_dict.update(iterable_dict['config'])
if 'values_changed' in client.netconf_diff:
values_dict = OrderedDict()
values_changed = client.netconf_diff['values_changed']
for item in values_changed:
item_keys = item[item.startswith("root") and len("root"):]
item_keys = item_keys.strip('[]').replace('][',',')
item_key_list = item_keys.split(',')
item_key_list = [i.replace('\'', '') for i in item_key_list]
dict_to_modify = copy.deepcopy(client.nc_dict)
values_dict.update(getTreeFromDictPath(dict_to_modify, item_key_list))
xml_dict.update(values_dict['config'])
if 'type_changes' in client.netconf_diff:
values_dict = OrderedDict()
for item in client.netconf_diff['type_changes']:
new_item_type = client.netconf_diff['type_changes'][item]['new_type']
new_item_value = client.netconf_diff['type_changes'][item]['new_value']
item_keys = item[item.startswith("root") and len("root"):]
item_keys = item_keys.strip('[]').replace('][',',')
item_key_list = item_keys.split(',')
item_key_list = [i.replace('\'', '') for i in item_key_list]
dict_to_modify = copy.deepcopy(client.nc_dict)
setInDict(dict_to_modify, item_key_list, new_item_value)
values_dict.update(getTreeFromDictPath(dict_to_modify, item_key_list))
xml_dict.update(values_dict['config'])
if not enable_openconfig:
for key in list(xml_dict.keys()):
if isinstance(xml_dict[key], list):
index_cnt = 0
for index in xml_dict[key]:
if "http://openconfig" in index['@xmlns']:
popped_openconfig_model = xml_dict[key].pop(index_cnt)
if client.debug:
print("Openconfig enable flag is not set (-o), popping the following model out")
popped_openconfig_xml = OrderedDict([(key, popped_openconfig_model)])
print(xmltodict.unparse(popped_openconfig_xml, pretty=True))
index_cnt=index_cnt+1
else:
if "http://openconfig" in xml_dict[key]['@xmlns']:
try:
popped_openconfig_model = xml_dict.pop(key)
if client.debug:
print("Openconfig enable flag is not set (-o), popping the following model out")
popped_openconfig_xml = OrderedDict([(key, popped_openconfig_model)])
print(xmltodict.unparse(popped_openconfig_xml, pretty=True))
except Exception as e:
print("Failed to pop openconfig model, error is")
print(e)
# Bail out if xml_dict is empty which implies there is no diff and the input cli string is already part of base config
if not len(list(xml_dict.keys())):
print("The diff is empty implying that the input cli snippet is already part of base config. Please check your inputs.")
sys.exit(1)
xml_dict = OrderedDict([('config', xml_dict)])
print("##################################################")
print("YANG XML version of the input CLI configuration:")
print("##################################################")
print(xmltodict.unparse(xml_dict, pretty=True))
print("Testing the generated YANG XML by doing a merge config....")
if results.test:
if not client.nc_config_merge(xmltodict.unparse(xml_dict, pretty=True).encode('utf-8')):
print("Successful!!")
print("The CLI configuration created by applying the generated YANG XML is...\n\n")
print(str(client.cli_show_command(show_cmd="show configuration commit changes last 1")["output"]))
else:
print("Failed to merge configuration using generated XML files, check for error messages above...")
sys.exit(1)
filepath = nc_xml_file
print("Input CLI configuration converted into YANG XML and saved in file: "+str(filepath))
with open(filepath, 'w') as yang_xml_fd:
yang_xml_fd.write(xmltodict.unparse(xml_dict, pretty=True))
with open('./yang_nc_base.xml', | |
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[1:, :], image_aug[:-1, :], atol=0.01)
aug = iaa.ReplaceElementwise(mask=0.5, replacement=iap.DiscreteUniform(1, 10), per_channel=True)
image = np.full((1, 1, 100), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(0 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1], atol=0.01)
def test_pickleable(self):
aug = iaa.ReplaceElementwise(mask=0.5, replacement=(0, 255),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
# not more tests necessary here as SaltAndPepper is just a tiny wrapper around
# ReplaceElementwise
class TestSaltAndPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_p_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper > 200
assert nb_salt > 200
def test_pickleable(self):
aug = iaa.SaltAndPepper(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseSaltAndPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
aug2 = iaa.CoarseSaltAndPepper(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarseSaltAndPepper(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarseSaltAndPepper(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarseSaltAndPepper(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.CoarseSaltAndPepper(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseSaltAndPepper(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=(4, 15),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
# not more tests necessary here as Salt is just a tiny wrapper around
# ReplaceElementwise
class TestSalt(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
# Salt() occasionally replaces with 127, which probably should be the center-point here anyways
assert np.all(observed >= 127)
def test_p_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper == 0
assert nb_salt > 200
def test_pickleable(self):
aug = iaa.Salt(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseSalt(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSalt(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarseSalt(p=0.5, size_px=100)
aug2 = iaa.CoarseSalt(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarseSalt(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarseSalt(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarseSalt(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
def test_size_px_or_size_percent_not_none(self):
got_exception = False
try:
_ = iaa.CoarseSalt(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseSalt(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseSalt(p=0.5, size_px=(4, 15),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
# not more tests necessary here as Salt is just a tiny wrapper around
# ReplaceElementwise
class TestPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_probability_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Pepper(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
assert np.all(observed <= 128)
def test_probability_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Pepper(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper > 200
assert nb_salt == 0
def test_pickleable(self):
aug = iaa.Pepper(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarsePepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarsePepper(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarsePepper(p=0.5, size_px=100)
aug2 = iaa.CoarsePepper(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarsePepper(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarsePepper(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarsePepper(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
def test_size_px_or_size_percent_not_none(self):
got_exception = False
try:
_ = iaa.CoarsePepper(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarsePepper(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarsePepper(p=0.5, size_px=(4, 15),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
class Test_invert(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked_defaults(self, mock_invert):
mock_invert.return_value = "foo"
arr = np.zeros((1,), dtype=np.uint8)
observed = iaa.invert(arr)
assert observed == "foo"
args | |
from datetime import datetime
import discord
import itertools
from .utils import formatString, getUsageEmbed, getOopsEmbed
# IDEAS
# 1. Paying out points (without bets)
class DiscordPoints:
"""
Class that parses Discord Points info and interactions
Attributes
__________
fire (Fire obj): The fire instance where information is fetched/updated
Functions
__________
async getDiscordPointsEmbed(page, guild) -> (discord.Embed)
Makes an embedded message with total points for each user
def createNewReward(guild, rewardString) -> (discord.Embed)
Adds a reward and returns the updated list of rewards as an embedded msg
"""
fire = None
def __init__(self, fire):
self.fire = fire
async def getDiscordPointsEmbed(self, page, guild):
"""
Makes an embedded message with DiscordPoints for each member in the guild
Parameters
----------
guild : discord.Guild
The server that we want to get information from
Returns
----------
discord.Embed
Embedded message of Discord Points for each member of the guild
"""
d = self.fire.fetchDiscordPoints(guild)
# This sorts the dictionary by highest-value and converts it to a list
# It takes form [(user_0.id, value_0) ...(user_n.id, value_n)]
info_arr = [(k, d[k]) for k in sorted(d, key=d.get, reverse=True)]
userString, pointsString, description = await self.__createdEmbedStrings(guild, info_arr, page)
title = "Discord Points"
return self.__createPointsEmbed(title, description, userString, pointsString)
def createNewReward(self, guild, rewardString):
"""
Create new reward for the guild
Parameters
----------
guild : discord.Guild
The server that we want to get information from
rewardString : string
String with the reward title and cost
Returns
----------
discord.Embed
Embedded message of the updated rewards for the server
"""
rewardStringList = ["".join(x) for _, x in itertools.groupby(rewardString, key=str.isdigit)]
if len(rewardStringList) < 2:
return getUsageEmbed(
"-addreward [Desired Reward] [Price of the Reward]\n\nexample: -addreward CSGO with friends 500")
try:
rewardCost = int(rewardStringList[len(rewardStringList) - 1])
rewardTitle = self.__parseRewardStringList(rewardStringList)
self.fire.postNewReward(guild, rewardTitle, rewardCost)
return self.getRewardsEmbed(guild)
except Exception as e:
print("ERROR ", e)
return getUsageEmbed(
"-addreward [Desired Reward] [Price of the Reward]\n\nexample: -addreward CSGO with friends 500")
def getRewardsEmbed(self, guild):
"""
Get all of the current rewards for the guild
Parameters
----------
guild : discord.Guild
The server that we want to get information from
Returns
----------
discord.Embed
Embedded message with all of the rewards for the guild
"""
rewards_dict = self.fire.fetchAllRewards(guild)
if rewards_dict == {}:
return self.__noRewardsEmbed(guild)
rewardsList = [(k, rewards_dict[k]) for k in sorted(rewards_dict, key=rewards_dict.get, reverse=True)]
idString, rewardsString, costsString = self.__getRewardsEmbedStrings(rewardsList)
return self.__createRewardsEmbed(idString, rewardsString, costsString)
def redeemReward(self, guild, user, reward_id):
"""
Redeems the desired reward with DiscordPoints
[@Todo: Ping Users associated with the reward]
Parameters
----------
guild : discord.Guild
The server that we want to get information from
user : discord.Member if in guild, discord.User otherwise
The user that redeemed the reward
reward_id : Int
The id of the reward to redeem
Returns
----------
discord.Embed
Embedded message with the redeemed reward
"""
points_dict = self.fire.fetchDiscordPoints(guild)
rewards_dict = self.fire.fetchAllRewards(guild)
rewards_list = [(k, rewards_dict[k]) for k in sorted(rewards_dict, key=rewards_dict.get, reverse=True)]
try:
# Check to see if the reward_id is within the list of rewards
if int(reward_id) > len(rewards_list) or int(reward_id) < 1:
return self.__createNotARewardEmbed()
reward_title = rewards_list[int(reward_id) - 1][0]
reward_cost = rewards_list[int(reward_id) - 1][1]
# Check to see if the user has enough points to redeem the reward
if points_dict[str(user.id)] and points_dict[str(user.id)] < reward_cost:
return self.__createNotEnoughPointsEmbed(user, points_dict[str(user.id)])
else:
new_points = points_dict[str(user.id)] - reward_cost
self.fire.postNewDiscordPoints(guild, str(user.id), new_points)
return self.__createRedeemRewardEmbed(reward_title, reward_cost, user, new_points)
except Exception as e:
print(e)
return getUsageEmbed("-redeemReward [Desired Reward Id]\n\nexample: -redeemReward 3")
def addPoints(self, guild, author, user, points):
"""
add Points to a specific User
[@Todo: Ping Users associated with the points]
Parameters
----------
guild : discord.Guild
The server that we want to get information from
author : message.user
user : discord.Member if in guild, discord.User otherwise
The user that redeemed the reward
points : Int
The amount of points
Returns
----------
discord.Embed
Embedded message with the redeemed reward
"""
points_dict = self.fire.fetchDiscordPoints(guild)
print(user.id)
try:
if not str(user.id) in points_dict:
return getOopsEmbed("User ID not correct")
elif not author.guild_permissions.administrator:
return getOopsEmbed("Command can only be used by Server-Admins")
print(points_dict[str(user.id)])
new_points = points_dict[str(user.id)] + int(points)
print(new_points)
self.fire.postNewDiscordPoints(guild, str(user.id), new_points)
return self.__createPointsEmbed("Points added", "Points were added to balance", f"{user}", f"{new_points}")
except Exception as e:
print(e)
print("Error adding points")
return getOopsEmbed("Error adding points, check console")
# ---------- MARK: - Private Functions ----------
async def __createdEmbedStrings(self, guild, sortedList, page):
"""
Private helper function to create strings for the embedded message
Parameters
----------
guild : (discord.Guild)
The server that we are tracking
sortedList : arr[(key_0, val_0) ... (key_n, val_n)]
The sorted (by val) list of key, val pairs where key: user_id, val: points
page : (int)
Page of the message we want to look at (20 entries per page)
Returns
----------
discord.Embed
Formatted information embedded into a message
"""
member_dict = await self.fire.fetchAllMembers(guild)
# Max 20 entries / page
pages = len(sortedList) // 20 + 1
userString = ""
pointsString = ""
rankString = ""
if page > pages or page < 0:
page = 1
for i in range(0, 20):
shiftedIndex = (page - 1) * 20 + i
if shiftedIndex < len(sortedList):
user_id = sortedList[shiftedIndex][0]
points = sortedList[shiftedIndex][1]
if int(user_id) in member_dict.keys():
userString += member_dict[int(user_id)] + '\n'
pointsString += str(points) + '\n'
description = "Page " + str(page) + " of " + str(page)
return userString, pointsString, description
def __createPointsEmbed(self, title, description, userString, pointsString):
"""
Formats information into an embedded message
Parameters
----------
title: (str)
Title for the embedded message
description: (str)
Description for the embedded message
userString: (str)
String representing the list of ordered users
timeString: (str)
String representing the list of ordered points
rankString: (str)
String representing the ranks of each user
Returns
----------
discord.Embed
Formatted information embedded into a message
"""
now = datetime.today()
embed = discord.Embed(title=title, description=description, timestamp=now)
embed.set_footer(text="Kirbec Bot", icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
embed.add_field(name="Username", value=userString)
embed.add_field(name="Discord Points", value=pointsString)
return embed
def __noRewardsEmbed(self, guild):
"""
Private function that shows that there are no rewards yet for the guild
Parameters
----------
guild : discord.Guild
The server that we want to get information from
Returns
----------
discord.Embed
Embedded message that states no rewards are in the guild
"""
now = datetime.today()
embed = discord.Embed(title="Oops!", description="", timestamp=now)
embed.set_footer(text="Kirbec Bot", icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
embed.add_field(name="No Rewards Set Yet!",
value="To add a reward:\n-addreward [Desired Reward] [Price of the Reward]")
return embed
def __getRewardsEmbedStrings(self, rewardsList):
"""
Private function that gets formatted strings for the list of rewards
Parameters
----------
rewardsList: [(reward_title_0, cost_0)...]
List of rewards sorted by the highest cost
Returns
----------
idString: string
String representing the id's of the rewards separated by '\n'
rewardString: string
String representing the title of the rewards separated by '\n'
costString: string
String representing the costs of the rewards separated by '\n'
"""
idString = ""
rewardString = ""
costString = ""
for i in range(len(rewardsList)):
numLines, formattedRewardString = formatString(str(rewardsList[i][0]))
idString += str(i + 1) + ("\n" * numLines)
rewardString += formattedRewardString + "\n"
costString += str(rewardsList[i][1]) + ("\n" * numLines)
return idString, rewardString, costString
def __createRewardsEmbed(self, idString, rewardString, costString):
"""
Private function to help create a rewards embed
Parameters
----------
idString: string
String representing the id's of the rewards separated by '\n'
rewardString: string
String representing the title of the rewards separated by '\n'
costString: string
String representing the costs of the rewards separated by '\n'
Returns
----------
discord.Embed
Embedded message that states all of the rewards
"""
title = "Discord Point Rewards"
description = ""
now = datetime.today()
embed = discord.Embed(title=title, description=description, timestamp=now)
embed.set_footer(text="Kirbec Bot", icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
embed.add_field(name="ID", value=idString)
embed.add_field(name="Reward", value=rewardString)
embed.add_field(name="Price", value=costString)
return embed
def __createRedeemRewardEmbed(self, reward_title, reward_cost, user, new_points):
"""
Private function to help create a redeem reward embed
Parameters
----------
reward_title: string
Title of the reward to be redeemed
reward_cost : int
Cost of the reward to be redeemed
user : discord.Member if in guild, discord.User otherwise
User_id of the user that redeemed the reward
Returns
----------
discord.Embed
Embedded message that states the redeemed reward
"""
title = "Reward Redeemed"
description = ""
now = datetime.today()
embed = discord.Embed(title=title, description=description, timestamp=now)
embed.set_thumbnail(url=user.avatar_url)
embed.set_author(name=user.display_name, icon_url=user.avatar_url)
embed.set_footer(text="Kirbec Bot", icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
embed.add_field(name="Reward", value=reward_title, inline=False)
embed.add_field(name="Price", value=reward_cost, inline=False)
embed.add_field(name="Points Remaining", value=str(new_points), | |
<filename>ContestAnalyzerOnline/views.py<gh_stars>1-10
from django.shortcuts import render, redirect
from string import ascii_uppercase
import itertools
from ContestAnalyzerOnline.forms import ContestForm
from pycontestanalyzer.contest import Contest
from pycontestanalyzer.utils.downloads.logs import get_list_of_logs
from pycontestanalyzer.utils.contest import retrieve_contest_object
from pycontestanalyzer.plot_dictionary import plot_dictionary
import logging
logging.basicConfig(format='%(levelname)s: %(asctime)s UTC - views.py : %(message)s', level=logging.DEBUG)
# Define output folder
output_folder = "./data/"
# ________________________________________________________________________________________________________
def index(request):
if request.method == 'POST':
form = ContestForm(request.POST)
if form.is_valid():
request.session['cleaned_data'] = form.cleaned_data
if "new_callsign" in request.session.keys():
del request.session['new_callsign']
return render(request, 'processing.html', {'form': form})
else:
form = ContestForm()
return render(request, 'index.html', {'form': form})
# ________________________________________________________________________________________________________
def process(request):
# Get info from form
search_info = request.session['cleaned_data']
contest_type = search_info["name"]
callsign = search_info["callsign"].upper()
year = search_info["year"]
mode = search_info["mode"]
# Get call from availablecalls.html page. Store it in new_callsign so that it's available everywhere.
if request.GET.get('call'):
callsign = str(request.GET.get('call'))
request.session['new_callsign'] = callsign
# Download log and process it
contest = Contest(
contest=contest_type,
year=year,
mode=mode,
callsign=callsign,
output_folder=output_folder
)
# The call sign does not exist, provide list of available call signs
if not contest.call_exists:
list_of_calls = get_list_of_logs(contest_type=contest_type, year=year, mode=mode)
list_of_calls.sort()
calls_dict = {}
for number in range(1,10):
calls_dict[str(number)] = []
for call in list_of_calls:
if call[0] == str(number):
calls_dict[str(number)].append(call)
for letter in ascii_uppercase:
calls_dict[letter] = []
for call in list_of_calls:
if call[0] == letter:
calls_dict[letter].append(call)
return render(request, 'analysis_availablecalls.html', {'contest': contest, 'callsDict':sorted(calls_dict.items())})
# Check whether reverse beacon spots have been downloaded correctly
if not contest.download_spots_ok:
raise(NameError("Beacon spots not downloaded correctly"))
# Loop over the tools if necessary
logging.info("Process logs...")
contest.process()
logging.info("Logs processed")
return redirect('contestAnalyzer:mainPage')
# ________________________________________________________________________________________________________
def main_page(request):
# --- Get info from form
search_info = request.session['cleaned_data']
if "new_callsign" in request.session.keys():
request.session['cleaned_data']['callsign'] = request.session['new_callsign']
# --- Retrieve contest object from pickle file
contest = retrieve_contest_object(search_info=search_info, output_folder=output_folder)
return render(request, 'analysis_main.html', {'contest': contest, 'nbar':'main'})
# ________________________________________________________________________________________________________
def contest_summary(request):
# Get info from form
search_info = request.session['cleaned_data']
if "new_callsign" in request.session.keys():
request.session['cleaned_data']['callsign'] = request.session['new_callsign']
# Retrieve contest object from pickle file
contest = retrieve_contest_object(search_info=search_info, output_folder=output_folder)
summary_info = []
summary_info_total = []
qsos_cumul = 0
dxcc_cumul = 0
zones_cumul = 0
points_cumul = 0
for band in [10, 15, 20, 40, 80, 160]:
qsos = contest.log[(contest.log["isdupe"]==False) & (contest.log["band"] == band)]["call"].count()
dxcc = contest.log[(contest.log["isdupe"]==False) & (contest.log["band"] == band)]["dxcc"].value_counts().count()
zones = contest.log[(contest.log["isdupe"]==False) & (contest.log["band"] == band)]["mynr"].value_counts().count()
points = contest.log[(contest.log["isdupe"]==False) & (contest.log["band"] == band)]["points"].sum()
qsos_cumul += qsos
dxcc_cumul += dxcc
zones_cumul += zones
points_cumul += points
if qsos>0:
summary_info.append([
band,
qsos,
dxcc,
zones,
points,
float(points)/qsos,
])
else:
summary_info.append([
band,
qsos,
dxcc,
zones,
points,
0.,
])
summary_info_total.append([
"Total",
qsos_cumul,
dxcc_cumul,
zones_cumul,
points_cumul,
float(points_cumul)/qsos_cumul,
(zones_cumul+dxcc_cumul)*points_cumul
])
return render(request, 'analysis_summary.html', {'summary_info': summary_info,
'summary_info_total': summary_info_total,
'nbar': 'summary'})
#________________________________________________________________________________________________________
def contest_log(request):
# --- Get info from form
search_info = request.session['cleaned_data']
if "new_callsign" in request.session.keys():
request.session['cleaned_data']['callsign'] = request.session['new_callsign']
# --- Retrieve contest object from pickle file
contest = retrieve_contest_object(search_info=search_info, output_folder=output_folder)
qsos_page = 50
log = contest.log
cumulated_info = []
if request.GET.get('filter'):
rule = str(request.GET['filter']).split(",")
for r in rule:
if "band" in r:
band = int(r.replace("band:", ""))
log = log[log["band"] == band]
cumulated_info.append(r)
if 'call' in r:
call = str(r.replace("call:", ""))
log = log[log["call"] == call]
cumulated_info.append(r)
if 'freq' in r:
freq = float(r.replace("freq:", ""))
log = log[log["frequency"] == freq]
cumulated_info.append(r)
if 'date' in r:
date = str(r.replace("date:", ""))
log = log[log["date"] == date]
cumulated_info.append(r)
if 'time' in r:
time = str(r.replace("time:", ""))
log = log[log["time"] == time]
cumulated_info.append(r)
if 'cont' in r:
cont = str(r.replace("cont:", ""))
log = log[log["continent"] == cont]
cumulated_info.append(r)
if 'dxcc' in r:
dxcc = str(r.replace("dxcc:", ""))
log = log[log["dxcc"] == dxcc]
cumulated_info.append(r)
if 'cq' in r:
zonecq = int(r.replace("cq:", ""))
log = log[log["zonecq"] == zonecq]
cumulated_info.append(r)
if 'points' in r:
points = int(r.replace("points:", ""))
log = log[log["points"] == points]
cumulated_info.append(r)
if 'maxRate_1min' in r:
log = log[log["maxRate_1min"] == 1]
cumulated_info.append(r)
if 'maxRate_5min' in r:
log = log[log["maxRate_5min"] == 1]
cumulated_info.append(r)
if 'maxRate_10min' in r:
log = log[log["maxRate_10min"] == 1]
cumulated_info.append(r)
if 'maxRate_30min' in r:
log = log[log["maxRate_30min"] == 1]
cumulated_info.append(r)
if 'maxRate_60min' in r:
log = log[log["maxRate_60min"] == 1]
cumulated_info.append(r)
if 'maxRate_120min' in r:
log = log[log["maxRate_120min"] == 1]
cumulated_info.append(r)
cumulated_info = ','.join(cumulated_info)
cumulated_unique_band = []
if request.GET.get('unique_band'):
rule = str(request.GET['unique_band']).split(",")
for r in rule:
if 'call' in r:
log = log.groupby(["call", "band"], sort=False, as_index=False).first()
cumulated_unique_band.append(r)
if 'freq' in r:
log = log.groupby(["frequency", "band"], sort=False, as_index=False).first()
cumulated_unique_band.append(r)
if 'date' in r:
log = log.groupby(["date", "band"], sort=False, as_index=False).first()
cumulated_unique_band.append(r)
if 'time' in r:
log = log.groupby(["time", "band"], sort=False, as_index=False).first()
cumulated_unique_band.append(r)
if 'cont' in r:
log = log.groupby(["continent", "band"], sort=False, as_index=False).first()
cumulated_unique_band.append(r)
if 'dxcc' in r:
log = log.groupby(["dxcc", "band"], sort=False, as_index=False).first()
cumulated_unique_band.append(r)
if 'cq' in r:
log = log.groupby(["zonecq", "band"], sort=False, as_index=False).first()
cumulated_unique_band.append(r)
cumulated_unique_band = ','.join(cumulated_unique_band)
cumulated_unique = []
if request.GET.get('unique'):
rule = str(request.GET['unique']).split(",")
for r in rule:
if "band" in r:
log = log.groupby(["band"], sort=False, as_index=False).first()
cumulated_unique.append(r)
if 'call' in r:
log = log.groupby(["call"], sort=False, as_index=False).first()
cumulated_unique.append(r)
if 'freq' in r:
log = log.groupby(["frequency"], sort=False, as_index=False).first()
cumulated_unique.append(r)
if 'date' in r:
log = log.groupby(["date"], sort=False, as_index=False).first()
cumulated_unique.append(r)
if 'time' in r:
log = log.groupby(["time"], sort=False, as_index=False).first()
cumulated_unique.append(r)
if 'cont' in r:
log = log.groupby(["continent"], sort=False, as_index=False).first()
cumulated_unique.append(r)
if 'dxcc' in r:
log = log.groupby(["dxcc"], sort=False, as_index=False).first()
cumulated_unique.append(r)
if 'cq' in r:
log = log.groupby(["zonecq"], sort=False, as_index=False).first()
cumulated_unique.append(r)
cumulated_unique = ','.join(cumulated_unique)
filtered_length = len(log)
page = 1
if request.GET.get('filter'):
rule = str(request.GET['filter']).split(",")
for r in rule:
if "page" in r:
page = int(r.replace("page:", ""))
log = log[(page-1)*qsos_page:page*qsos_page]
num_pages = []
for i in range(1, int(round(filtered_length/float(qsos_page), 0) + 1)):
num_pages.append(i)
log_info = []
for index, row in log.iterrows():
log_info.append([
row["counter"],
row["band"],
row["frequency"],
row["date"],
row["time"],
row["call"],
row["continent"],
row["dxcc"],
row["zonecq"],
row["points"],
])
return render(request, 'analysis_log.html', {"log_info": log_info, 'cumulated_info': cumulated_info,
'cumulated_unique': cumulated_unique,
'cumulated_unique_band': cumulated_unique_band, 'page': page,
'num_pages': num_pages, 'total_pages': len(num_pages), 'nbar': 'log',
'latbar':'log'
}
)
# ________________________________________________________________________________________________________
def contest_rates(request):
# --- Get info from form
search_info = request.session['cleaned_data']
if "new_callsign" in request.session.keys():
request.session['cleaned_data']['callsign'] = request.session['new_callsign']
# --- Retrieve contest object from pickle file
contest = retrieve_contest_object(search_info=search_info, output_folder=output_folder)
rates = contest.max_rates
rates_info = []
rates_info.append(["1", rates["1min"][0], rates["1min"][0]/1., rates["1min"][0]*60./1., str(rates["1min"][1][0]), str(rates["1min"][1][1])])
rates_info.append(["5", rates["5min"][0], rates["5min"][0]/5., rates["5min"][0]*60./5., str(rates["5min"][1][0]), str(rates["5min"][1][1])])
rates_info.append(["10", rates["10min"][0], rates["10min"][0]/10., rates["10min"][0]*60./10., str(rates["10min"][1][0]), str(rates["10min"][1][1])])
rates_info.append(["30", rates["30min"][0], rates["30min"][0]/30., rates["30min"][0]*60./30., str(rates["30min"][1][0]), str(rates["30min"][1][1])])
rates_info.append(["60", rates["60min"][0], rates["60min"][0]/60., rates["60min"][0]*60./60., str(rates["60min"][1][0]), str(rates["60min"][1][1])])
rates_info.append(["120", rates["120min"][0], rates["120min"][0]/120., rates["120min"][0]*60./120., str(rates["120min"][1][0]), str(rates["120min"][1][1])])
return render(request, 'analysis_rates.html', {'rates_info': rates_info, 'nbar': 'rates'})
#________________________________________________________________________________________________________
def contest_rates_per_minute(request):
# Get info from form
search_info = request.session['cleaned_data']
if "new_callsign" in request.session.keys():
request.session['cleaned_data']['callsign'] = request.session['new_callsign']
# Retrieve contest object from pickle file
contest = retrieve_contest_object(search_info=search_info, output_folder=output_folder)
list_rates = contest.ratesPerMinute
range_mins = []
for i in range(60):
range_mins.append(str(i).zfill(2))
days = list(contest.log.groupby("date").groups.keys())
days.sort()
range_hours = []
for i in range(24):
range_hours.append(str(i).zfill(2))
range_hours = range_hours*len(days)
range_days = []
for d in days:
range_days += [d]*24
# --- list_rates is a list made of 48 sub-lists. Each sub-list has 60 rates, for each minute in each hour of the
# contest. Now we will convert each rate in a list of [min, rate]
list_rates_min = []
for h, lh in enumerate(list_rates):
list_rates_min.append([])
for m, lm in enumerate(lh):
list_rates_min[h].append([str(m).zfill(2), lm])
return render(request, 'analysis_rate_per_min.html', {'list_rates': zip(range_days, range_hours, list_rates_min),
'range_mins': range_mins, 'nbar': 'rates'
}
)
# ________________________________________________________________________________________________________
def contest_dxcc_frequency(request):
# --- Get info from form
search_info = request.session['cleaned_data']
if "new_callsign" in request.session.keys():
request.session['cleaned_data']['callsign'] = request.session['new_callsign']
# --- Retrieve contest object from pickle file
contest = retrieve_contest_object(search_info=search_info, output_folder=output_folder)
qsos_page = 10
log = contest.log
cumulated_info = []
if request.GET.get('filter'):
rule = str(request.GET['filter']).split(",")
for r in rule:
if "band" in r:
band = int(r.replace("band:", ""))
log = log[log["band"]==band]
cumulated_info.append(r)
if 'date' in r:
date = str(r.replace("date:", ""))
log = log[log["date"]==date]
cumulated_info.append(r)
if 'time' in r:
time = str(r.replace("time:", ""))
log = log[log["time"]==time]
cumulated_info.append(r)
if 'dxcc' in r:
dxcc = str(r.replace("dxcc:", ""))
log = log[log["dxcc"]==dxcc]
cumulated_info.append(r)
cumulated_info = ','.join(cumulated_info)
page = 1
if request.GET.get('filter'):
rule = str(request.GET['filter']).split(",")
for r in rule:
if "page" in r:
page = int(r.replace("page:", ""))
grouped_counts = log.groupby("dxcc")["dxcc"].count()
counts = grouped_counts.tolist()
names = grouped_counts.index.tolist()
num_pages = []
filtered_length = len(grouped_counts)
for i in range(1, int(round(filtered_length/float(qsos_page), 0) + 1)):
num_pages.append(i)
list_dxcc = []
for n, c in zip(names, counts):
list_dxcc.append([n, c])
from operator import itemgetter
list_dxcc = sorted(list_dxcc, key=itemgetter(1))
list_dxcc = list(reversed(list_dxcc))
list_dxcc = list_dxcc[(page-1)*qsos_page:page*qsos_page]
return render(request, 'analysis_dxccfreq.html', {'list_dxcc': list_dxcc, 'page':page, 'num_pages':num_pages,
| |
import logging
import configparser
from pymongo import MongoClient
from bson import ObjectId
import os
logger = logging.getLogger("MongoDB Controller")
logger.setLevel(logging.INFO)
# create file handler which logs even debug messages
log_file_handler = logging.FileHandler("covid_chatbot.log")
log_file_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s,%(msecs)d - %(name)s - %(levelname)s - %(message)s")
log_file_handler.setFormatter(formatter)
logger.addHandler(log_file_handler)
# load configs from config.ini file
config = configparser.ConfigParser(inline_comment_prefixes="#")
config.read(os.path.join(os.path.dirname(__file__), "..", "config.ini"))
db_settings = config["MONGODB"]
try:
logging.info("Loading config settings")
if "username" not in db_settings or db_settings["username"] == "":
db_username = None
logging.warning("Mongodb username is not defined.")
else:
db_username = db_settings["username"]
if "password" not in db_settings or db_settings["password"] == "":
db_password = None
logging.warning("Mongodb password is not defined.")
else:
db_password = db_settings["password"]
if "port" not in db_settings or db_settings["port"] == "":
raise Exception("Mongodb port is not defined.")
else:
db_port = db_settings["port"]
if "address" not in db_settings or db_settings["address"] == "":
raise Exception("Mongodb address is not defined.")
else:
db_address = db_settings["address"]
logging.info("Config settings loaded successfully")
except Exception as e:
logging.error(str(e))
exit()
if db_username and db_password:
mongo_client = MongoClient("{}:{}".format(db_address, db_port), username=db_username, password=db_password)
elif db_username is None and db_password is None:
mongo_client = MongoClient("{}:{}".format(db_address, db_port))
else:
logging.error(str(Exception(
"There is an problem MONGODB section of config.ini file, either username or password is not defined")))
exit()
def add_topic(name, subtopics, keywords):
"""
add new topic into db
:param name: name of topic
:param subtopics: subtopics under the topic
:param keywords: keywords under the topic
:return: True if the operation is successful, False if an error happens
"""
db = mongo_client.COVIDChatbot_Topics
topic_details = {
"name": name,
"subtopics": subtopics,
"keywords": keywords,
}
collection = db.COVIDChatbot_Topics.insert_one(topic_details)
if str(collection.inserted_id) != "": # if the request is successfully added to the database
return str(collection.inserted_id)
return None
def add_subtopic(name, questions_answers, keywords):
"""
add new subtopic into db
:param name
:param questions_answers
:param keywords
:return: True if the operation is successful, False if an error happens
"""
db = mongo_client.COVIDChatbot_Subtopics
subtopic_details = {
"name": name,
"questions_answers": questions_answers,
"keywords": keywords,
}
collection = db.COVIDChatbot_Subtopics.insert_one(subtopic_details)
if str(collection.inserted_id) != "": # if the request is successfully added to the database
return str(collection.inserted_id)
return None
def add_question_answer(question, answer, more_details, keywords):
"""
add new question/answer into db
:param question: question
:param answer: answer for the question
:param more_details: more details including links, videos, etc
:param keywords: keywords of the question
:return: True if the operation is successful, False if an error happens
"""
db = mongo_client.COVIDChatbot_QAs
subtopic_details = {
"question": question,
"answer": answer,
"keywords": keywords,
"more_details": more_details,
}
collection = db.COVIDChatbot_QAs.insert_one(subtopic_details)
if str(collection.inserted_id) != "": # if the request is successfully added to the database
return str(collection.inserted_id)
return None
def get_topics():
"""
get list of topics
:return: topics object
"""
db = mongo_client.COVIDChatbot_Topics
query_result = db.COVIDChatbot_Topics.find()
if query_result is not None:
return list(query_result)
return None
def get_topic(id):
"""
find topic with given id
:param id: id of topic
:return: topic object if the id, None if topic does not exist
"""
db = mongo_client.COVIDChatbot_Topics
query_result = db.COVIDChatbot_Topics.find_one({"_id": ObjectId(str(id))})
if query_result is not None:
return query_result
return None
def get_subtopic(id):
"""
find subtopic with given id
:param id: id of subtopic
:return: subtopic object if the id, None if subtopic does not exist
"""
db = mongo_client.COVIDChatbot_Subtopics
query_result = db.COVIDChatbot_Subtopics.find_one({"_id": ObjectId(str(id))})
if query_result is not None:
return query_result
return None
def get_question_answer(id):
"""
find topic with given id
:param id: id of question/answer
:return: question/answer object if the id, None if topic does not exist
"""
db = mongo_client.COVIDChatbot_QAs
query_result = db.COVIDChatbot_QAs.find_one({"_id": ObjectId(str(id))})
if query_result is not None:
return query_result
return None
def update_topic(id, subtopics=None, keywords=None):
"""
update topic
:param id
:param subtopics
:param keywords
:return: True if the operation is successful, False if an error happens
"""
db = mongo_client.COVIDChatbot_Topics
if subtopics is not None and keywords is not None:
query_result = db.COVIDChatbot_Topics.update_one({"_id": ObjectId(str(id))},
{
"$set": {
"subtopics": subtopics,
"keywords": keywords,
}
},
upsert=False)
elif subtopics is not None:
query_result = db.COVIDChatbot_Topics.update_one({"_id": ObjectId(str(id))},
{
"$set": {
"subtopics": subtopics,
}
},
upsert=False)
elif keywords is not None:
query_result = db.COVIDChatbot_Topics.update_one({"_id": ObjectId(str(id))},
{
"$set": {
"keywords": keywords,
}
},
upsert=False)
if query_result.modified_count > 0:
return True
return False
def update_subtopic(id, questions_answers=None, keywords=None):
"""
update subtopic
:param id
:param questions_answers
:param keywords
:return: True if the operation is successful, False if an error happens
"""
db = mongo_client.COVIDChatbot_Subtopics
if questions_answers is not None and keywords is not None:
query_result = db.COVIDChatbot_Subtopics.update_one({"_id": ObjectId(str(id))},
{
"$set": {
"questions_answers": questions_answers,
"keywords": keywords,
}
},
upsert=False)
elif questions_answers is not None:
query_result = db.COVIDChatbot_Subtopics.update_one({"_id": ObjectId(str(id))},
{
"$set": {
"questions_answers": questions_answers,
}
},
upsert=False)
elif keywords is not None:
query_result = db.COVIDChatbot_Subtopics.update_one({"_id": ObjectId(str(id))},
{
"$set": {
"keywords": keywords,
}
},
upsert=False)
if query_result.modified_count > 0:
return True
return False
def add_user_to_blacklist(phone_number):
"""
add user number to the blacklist, this user misbehaved
:param phone_number: user phone number
:return: True if the operation is successful, False if an error happens
"""
db = mongo_client.COVIDChatbot_Misconduct
user_details = {
"phone_number": "{}".format("+" + phone_number if not phone_number.startswith("+") else phone_number),
}
collection = db.COVIDChatbot_Misconduct.insert_one(user_details)
if str(collection.inserted_id) != "": # if the request is successfully added to the database
return str(collection.inserted_id)
return False
def check_user_in_blacklist(phone_number):
"""
check if user is in the blacklist
:param phone_number: user phone number
:return: user object if the id, None if user does not exist
"""
db = mongo_client.COVIDChatbot_Misconduct
query_result = db.COVIDChatbot_Misconduct.find_one({"phone_number": "{}".format("+" + phone_number if not phone_number.startswith("+") else phone_number)})
if query_result is not None:
return query_result
return None
def get_handover_volunteers():
"""
get list of volunteers to answer users' queries (handover phone numbers, and language they can speak)
:return: list of volunteers, None if no volunteer registered
"""
db = mongo_client.COVIDChatbot_HandoverNumbers
query_result = db.COVIDChatbot_HandoverNumbers.find()
if query_result is not None:
return list(query_result)
return None
def get_handover_volunteers_by_language(language):
"""
get list of volunteers to answer users' queries for given language
:return: list of volunteers, None if no volunteer registered
"""
db = mongo_client.COVIDChatbot_HandoverNumbers
query_result = db.COVIDChatbot_HandoverNumbers.find({"languages": language})
if query_result is not None:
return list(query_result)
return None
def get_volunteer_details(phone_number):
"""
get volunteer's details
:param phone_number: volunteer's phone number
:return: volunteer as object, None if volunteer's number does not exist
"""
db = mongo_client.COVIDChatbot_HandoverNumbers
query_result = db.COVIDChatbot_HandoverNumbers.find_one({"phone_number": "{}".format("+" + phone_number if not phone_number.startswith("+") else phone_number)})
if query_result is not None:
return query_result
return None
def add_handover_volunteer(full_name, phone_number, languages):
"""
add a volunteer to the handover list
:param full_name: volunteer's first name and last name
:param phone_number: volunteer's phone number
:param languages: language(s) the person can speak (answer users' queries) - used for matching purposes
:return: True if the operation is successful, None if an error happens
"""
if get_volunteer_details(phone_number) is None:
db = mongo_client.COVIDChatbot_HandoverNumbers
handover_request_details = {
"full_name": full_name,
"phone_number": "{}".format("+" + phone_number if not phone_number.startswith("+") else phone_number),
"languages": languages,
"num_users_answered": 0
}
collection = db.COVIDChatbot_HandoverNumbers.insert_one(handover_request_details)
if str(collection.inserted_id) != "": # if the request is successfully added to the database
return "Volunteer id: {}".format(str(collection.inserted_id))
return None
else:
return "This number is already registered to the list of volunteers!"
def add_handover_request(user_phone_number, language):
"""
add user's handover request to the waiting list
:param user_phone_number: user phone number
:param language: language of user
:return: True if the operation is successful, None if an error happens
"""
handover_request = get_handover_request(user_phone_number) # check to see if any handover request from the user is still in the stack
if handover_request is None:
db = mongo_client.COVIDChatbot_HandoverRequests
handover_request_details = {
"user_number": "{}".format("+" + user_phone_number if not user_phone_number.startswith("+") else user_phone_number),
"language": language,
"volunteer_number": None,
"status": "WAITING"
}
collection = db.COVIDChatbot_HandoverRequests.insert_one(handover_request_details)
if str(collection.inserted_id) != "": # if the request is successfully added to the database
return str(collection.inserted_id)
else:
return str(handover_request["_id"])
def accept_handover_request(user_phone_number, handovered_phone_number):
"""
add user's handover request to the waiting list
:param user_phone_number: user phone number
:param handovered_phone_number: phone number of person who accepted to answer user's queries
:return: True if the operation is successful, False if an error happens
"""
db = mongo_client.COVIDChatbot_HandoverRequests
query_result = db.COVIDChatbot_HandoverRequests.update_one({"user_number": "{}".format("+" + user_phone_number if not user_phone_number.startswith("+") else user_phone_number)},
{
"$set": {
"volunteer_number": "{}".format("+" + handovered_phone_number if not handovered_phone_number.startswith("+") else handovered_phone_number),
"status": "OPEN"
}
},
upsert=False)
if query_result.modified_count > 0:
return True
return False
def close_handover_request(user_phone_number):
"""
close user's handover request
:param user_phone_number: user phone number
:return: True if the operation is successful, | |
<filename>sdk/python/pulumi_linode/_inputs.py<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from . import _utilities, _tables
__all__ = [
'FirewallDeviceArgs',
'FirewallInboundArgs',
'FirewallOutboundArgs',
'InstanceAlertsArgs',
'InstanceBackupsArgs',
'InstanceBackupsScheduleArgs',
'InstanceConfigArgs',
'InstanceConfigDevicesArgs',
'InstanceConfigDevicesSdaArgs',
'InstanceConfigDevicesSdbArgs',
'InstanceConfigDevicesSdcArgs',
'InstanceConfigDevicesSddArgs',
'InstanceConfigDevicesSdeArgs',
'InstanceConfigDevicesSdfArgs',
'InstanceConfigDevicesSdgArgs',
'InstanceConfigDevicesSdhArgs',
'InstanceConfigHelpersArgs',
'InstanceDiskArgs',
'InstanceSpecsArgs',
'LkeClusterPoolArgs',
'LkeClusterPoolNodeArgs',
'NodeBalancerConfigNodeStatusArgs',
'NodeBalancerTransferArgs',
'ObjectStorageBucketCertArgs',
'StackScriptUserDefinedFieldArgs',
'GetStackScriptUserDefinedFieldArgs',
]
@pulumi.input_type
class FirewallDeviceArgs:
def __init__(__self__, *,
entity_id: Optional[pulumi.Input[int]] = None,
id: Optional[pulumi.Input[int]] = None,
label: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] entity_id: The ID of the underlying entity this device references (i.e. the Linode's ID).
:param pulumi.Input[int] id: The ID of the Firewall Device.
:param pulumi.Input[str] label: This Firewall's unique label.
:param pulumi.Input[str] type: The type of Firewall Device.
"""
if entity_id is not None:
pulumi.set(__self__, "entity_id", entity_id)
if id is not None:
pulumi.set(__self__, "id", id)
if label is not None:
pulumi.set(__self__, "label", label)
if type is not None:
pulumi.set(__self__, "type", type)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter(name="entityId")
def entity_id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the underlying entity this device references (i.e. the Linode's ID).
"""
return pulumi.get(self, "entity_id")
@entity_id.setter
def entity_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "entity_id", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the Firewall Device.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def label(self) -> Optional[pulumi.Input[str]]:
"""
This Firewall's unique label.
"""
return pulumi.get(self, "label")
@label.setter
def label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "label", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of Firewall Device.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@pulumi.input_type
class FirewallInboundArgs:
def __init__(__self__, *,
addresses: pulumi.Input[Sequence[pulumi.Input[str]]],
ports: pulumi.Input[Sequence[pulumi.Input[str]]],
protocol: pulumi.Input[str]):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] addresses: A list of IP addresses, CIDR blocks, or `0.0.0.0/0` (to allow all) this rule applies to.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ports: A list of ports and/or port ranges (i.e. "443" or "80-90").
:param pulumi.Input[str] protocol: The network protocol this rule controls.
"""
pulumi.set(__self__, "addresses", addresses)
pulumi.set(__self__, "ports", ports)
pulumi.set(__self__, "protocol", protocol)
@property
@pulumi.getter
def addresses(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list of IP addresses, CIDR blocks, or `0.0.0.0/0` (to allow all) this rule applies to.
"""
return pulumi.get(self, "addresses")
@addresses.setter
def addresses(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "addresses", value)
@property
@pulumi.getter
def ports(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list of ports and/or port ranges (i.e. "443" or "80-90").
"""
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "ports", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[str]:
"""
The network protocol this rule controls.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[str]):
pulumi.set(self, "protocol", value)
@pulumi.input_type
class FirewallOutboundArgs:
def __init__(__self__, *,
addresses: pulumi.Input[Sequence[pulumi.Input[str]]],
ports: pulumi.Input[Sequence[pulumi.Input[str]]],
protocol: pulumi.Input[str]):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] addresses: A list of IP addresses, CIDR blocks, or `0.0.0.0/0` (to allow all) this rule applies to.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ports: A list of ports and/or port ranges (i.e. "443" or "80-90").
:param pulumi.Input[str] protocol: The network protocol this rule controls.
"""
pulumi.set(__self__, "addresses", addresses)
pulumi.set(__self__, "ports", ports)
pulumi.set(__self__, "protocol", protocol)
@property
@pulumi.getter
def addresses(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list of IP addresses, CIDR blocks, or `0.0.0.0/0` (to allow all) this rule applies to.
"""
return pulumi.get(self, "addresses")
@addresses.setter
def addresses(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "addresses", value)
@property
@pulumi.getter
def ports(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list of ports and/or port ranges (i.e. "443" or "80-90").
"""
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "ports", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[str]:
"""
The network protocol this rule controls.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[str]):
pulumi.set(self, "protocol", value)
@pulumi.input_type
class InstanceAlertsArgs:
def __init__(__self__, *,
cpu: Optional[pulumi.Input[int]] = None,
io: Optional[pulumi.Input[int]] = None,
network_in: Optional[pulumi.Input[int]] = None,
network_out: Optional[pulumi.Input[int]] = None,
transfer_quota: Optional[pulumi.Input[int]] = None):
if cpu is not None:
pulumi.set(__self__, "cpu", cpu)
if io is not None:
pulumi.set(__self__, "io", io)
if network_in is not None:
pulumi.set(__self__, "network_in", network_in)
if network_out is not None:
pulumi.set(__self__, "network_out", network_out)
if transfer_quota is not None:
pulumi.set(__self__, "transfer_quota", transfer_quota)
@property
@pulumi.getter
def cpu(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "cpu")
@cpu.setter
def cpu(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu", value)
@property
@pulumi.getter
def io(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "io")
@io.setter
def io(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "io", value)
@property
@pulumi.getter(name="networkIn")
def network_in(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "network_in")
@network_in.setter
def network_in(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "network_in", value)
@property
@pulumi.getter(name="networkOut")
def network_out(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "network_out")
@network_out.setter
def network_out(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "network_out", value)
@property
@pulumi.getter(name="transferQuota")
def transfer_quota(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "transfer_quota")
@transfer_quota.setter
def transfer_quota(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "transfer_quota", value)
@pulumi.input_type
class InstanceBackupsArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
schedule: Optional[pulumi.Input['InstanceBackupsScheduleArgs']] = None):
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if schedule is not None:
pulumi.set(__self__, "schedule", schedule)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def schedule(self) -> Optional[pulumi.Input['InstanceBackupsScheduleArgs']]:
return pulumi.get(self, "schedule")
@schedule.setter
def schedule(self, value: Optional[pulumi.Input['InstanceBackupsScheduleArgs']]):
pulumi.set(self, "schedule", value)
@pulumi.input_type
class InstanceBackupsScheduleArgs:
def __init__(__self__, *,
day: Optional[pulumi.Input[str]] = None,
window: Optional[pulumi.Input[str]] = None):
if day is not None:
pulumi.set(__self__, "day", day)
if window is not None:
pulumi.set(__self__, "window", window)
@property
@pulumi.getter
def day(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "day")
@day.setter
def day(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "day", value)
@property
@pulumi.getter
def window(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "window")
@window.setter
def window(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "window", value)
@pulumi.input_type
class InstanceConfigArgs:
def __init__(__self__, *,
label: pulumi.Input[str],
comments: Optional[pulumi.Input[str]] = None,
devices: Optional[pulumi.Input['InstanceConfigDevicesArgs']] = None,
helpers: Optional[pulumi.Input['InstanceConfigHelpersArgs']] = None,
kernel: Optional[pulumi.Input[str]] = None,
memory_limit: Optional[pulumi.Input[int]] = None,
root_device: Optional[pulumi.Input[str]] = None,
run_level: Optional[pulumi.Input[str]] = None,
virt_mode: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] label: The Config's label for display purposes. Also used by `boot_config_label`.
:param pulumi.Input[str] comments: - Arbitrary user comments about this `config`.
:param pulumi.Input['InstanceConfigDevicesArgs'] devices: A list of `disk` or `volume` attachments for this `config`. If the `boot_config_label` omits a `devices` block, the Linode will not be booted.
:param pulumi.Input['InstanceConfigHelpersArgs'] helpers: Helpers enabled when booting to this Linode Config.
:param pulumi.Input[str] kernel: - A Kernel ID to boot a Linode with. Default is based on image choice. Examples are `linode/latest-64bit`, `linode/grub2`, `linode/direct-disk`, etc. See all kernels [here](https://api.linode.com/v4/linode/kernels). Note that this is a paginated API endpoint ([docs](https://developers.linode.com/api/v4/linode-kernels)).
:param pulumi.Input[int] memory_limit: - Defaults to the total RAM of the Linode
:param pulumi.Input[str] root_device: - The root device to boot. The corresponding disk must be attached to a `device` slot. Example: `"/dev/sda"`
:param pulumi.Input[str] run_level: - Defines the state of your Linode after booting. Defaults to `"default"`.
:param pulumi.Input[str] virt_mode: - Controls the virtualization mode. Defaults to `"paravirt"`.
"""
pulumi.set(__self__, "label", label)
if comments is not None:
pulumi.set(__self__, "comments", comments)
if devices is not None:
pulumi.set(__self__, "devices", devices)
if helpers is not None:
pulumi.set(__self__, "helpers", helpers)
if kernel is not None:
pulumi.set(__self__, "kernel", kernel)
if memory_limit is not None:
pulumi.set(__self__, "memory_limit", memory_limit)
if root_device is not None:
pulumi.set(__self__, "root_device", root_device)
if run_level is not None:
pulumi.set(__self__, "run_level", run_level)
if virt_mode is not None:
pulumi.set(__self__, "virt_mode", virt_mode)
@property
@pulumi.getter
def label(self) -> pulumi.Input[str]:
"""
The Config's label for display purposes. Also used by `boot_config_label`.
"""
return pulumi.get(self, "label")
@label.setter
def label(self, value: pulumi.Input[str]):
pulumi.set(self, "label", value)
@property
@pulumi.getter
def comments(self) -> Optional[pulumi.Input[str]]:
"""
- Arbitrary user comments about this `config`.
"""
return pulumi.get(self, "comments")
@comments.setter
def comments(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comments", value)
@property
@pulumi.getter
def devices(self) -> Optional[pulumi.Input['InstanceConfigDevicesArgs']]:
"""
A list of `disk` or `volume` attachments for this `config`. If the `boot_config_label` omits a `devices` block, the Linode will not be booted.
"""
return pulumi.get(self, "devices")
@devices.setter
def devices(self, value: Optional[pulumi.Input['InstanceConfigDevicesArgs']]):
pulumi.set(self, "devices", value)
@property
@pulumi.getter
def helpers(self) -> Optional[pulumi.Input['InstanceConfigHelpersArgs']]:
"""
Helpers enabled when booting to this Linode Config.
"""
return pulumi.get(self, "helpers")
@helpers.setter
def helpers(self, value: Optional[pulumi.Input['InstanceConfigHelpersArgs']]):
pulumi.set(self, "helpers", value)
@property
@pulumi.getter
def kernel(self) -> Optional[pulumi.Input[str]]:
"""
- A Kernel ID to boot | |
view_layer, "select")
cls.isolated = False
elif modifiers == {"ctrl", "shift"}:
isolate_rto(cls, self, view_layer, "select", children=True)
else:
# toggle selectable
# reset select history
del rto_history["select"][view_layer]
# toggle selectability of collection
laycol_ptr.collection.hide_select = not laycol_ptr.collection.hide_select
cls.isolated = False
# reset select all history
if view_layer in rto_history["select_all"]:
del rto_history["select_all"][view_layer]
return {'FINISHED'}
class CMUnRestrictSelectAllOperator(Operator):
bl_label = "[SS Global] Disable Selection"
bl_description = (
" * LMB - Enable all/Restore.\n"
" * Shift+LMB - Invert.\n"
" * Ctrl+LMB - Copy/Paste RTOs.\n"
" * Ctrl+Alt+LMB - Swap RTOs.\n"
" * Alt+LMB - Discard history"
)
bl_idname = "view3d.un_restrict_select_all_collections"
bl_options = {'REGISTER', 'UNDO'}
def invoke(self, context, event):
global rto_history
view_layer = context.view_layer.name
modifiers = get_modifiers(event)
if not view_layer in rto_history["select_all"]:
rto_history["select_all"][view_layer] = []
if modifiers == {"alt"}:
# clear all states
del rto_history["select_all"][view_layer]
clear_copy("select")
clear_swap("select")
elif modifiers == {"ctrl"}:
copy_rtos(view_layer, "select")
elif modifiers == {"ctrl", "alt"}:
swap_rtos(view_layer, "select")
elif modifiers == {"shift"}:
invert_rtos(view_layer, "select")
else:
activate_all_rtos(view_layer, "select")
return {'FINISHED'}
class CMHideOperator(Operator):
bl_label = "[VV] Hide in Viewport"
bl_description = (
" * Shift+LMB - Isolate/Restore.\n"
" * Shift+Ctrl+LMB - Isolate nested/Restore.\n"
" * Ctrl+LMB - Toggle nested.\n"
" * Alt+LMB - Discard history"
)
bl_idname = "view3d.hide_collection"
bl_options = {'REGISTER', 'UNDO'}
name: StringProperty()
# static class var
isolated = False
def invoke(self, context, event):
global rto_history
cls = CMHideOperator
modifiers = get_modifiers(event)
view_layer = context.view_layer.name
laycol_ptr = layer_collections[self.name]["ptr"]
if not view_layer in rto_history["hide"]:
rto_history["hide"][view_layer] = {"target": "", "history": []}
if modifiers == {"alt"}:
del rto_history["hide"][view_layer]
cls.isolated = False
elif modifiers == {"shift"}:
isolate_rto(cls, self, view_layer, "hide")
elif modifiers == {"ctrl"}:
toggle_children(self, view_layer, "hide")
cls.isolated = False
elif modifiers == {"ctrl", "shift"}:
isolate_rto(cls, self, view_layer, "hide", children=True)
else:
# toggle visible
# reset hide history
del rto_history["hide"][view_layer]
# toggle view of collection
laycol_ptr.hide_viewport = not laycol_ptr.hide_viewport
cls.isolated = False
# reset hide all history
if view_layer in rto_history["hide_all"]:
del rto_history["hide_all"][view_layer]
return {'FINISHED'}
class CMUnHideAllOperator(Operator):
bl_label = "[VV Global] Hide in Viewport"
bl_description = (
" * LMB - Enable all/Restore.\n"
" * Shift+LMB - Invert.\n"
" * Ctrl+LMB - Copy/Paste RTOs.\n"
" * Ctrl+Alt+LMB - Swap RTOs.\n"
" * Alt+LMB - Discard history"
)
bl_idname = "view3d.un_hide_all_collections"
bl_options = {'REGISTER', 'UNDO'}
def invoke(self, context, event):
global rto_history
view_layer = context.view_layer.name
modifiers = get_modifiers(event)
if not view_layer in rto_history["hide_all"]:
rto_history["hide_all"][view_layer] = []
if modifiers == {"alt"}:
# clear all states
del rto_history["hide_all"][view_layer]
clear_copy("hide")
clear_swap("hide")
elif modifiers == {"ctrl"}:
copy_rtos(view_layer, "hide")
elif modifiers == {"ctrl", "alt"}:
swap_rtos(view_layer, "hide")
elif modifiers == {"shift"}:
invert_rtos(view_layer, "hide")
else:
activate_all_rtos(view_layer, "hide")
return {'FINISHED'}
class CMDisableViewportOperator(Operator):
bl_label = "[DV] Disable in Viewports"
bl_description = (
" * Shift+LMB - Isolate/Restore.\n"
" * Shift+Ctrl+LMB - Isolate nested/Restore.\n"
" * Ctrl+LMB - Toggle nested.\n"
" * Alt+LMB - Discard history"
)
bl_idname = "view3d.disable_viewport_collection"
bl_options = {'REGISTER', 'UNDO'}
name: StringProperty()
# static class var
isolated = False
def invoke(self, context, event):
global rto_history
cls = CMDisableViewportOperator
modifiers = get_modifiers(event)
view_layer = context.view_layer.name
laycol_ptr = layer_collections[self.name]["ptr"]
if not view_layer in rto_history["disable"]:
rto_history["disable"][view_layer] = {"target": "", "history": []}
if modifiers == {"alt"}:
del rto_history["disable"][view_layer]
cls.isolated = False
elif modifiers == {"shift"}:
isolate_rto(cls, self, view_layer, "disable")
elif modifiers == {"ctrl"}:
toggle_children(self, view_layer, "disable")
cls.isolated = False
elif modifiers == {"ctrl", "shift"}:
isolate_rto(cls, self, view_layer, "disable", children=True)
else:
# toggle disable
# reset disable history
del rto_history["disable"][view_layer]
# toggle disable of collection in viewport
laycol_ptr.collection.hide_viewport = not laycol_ptr.collection.hide_viewport
cls.isolated = False
# reset disable all history
if view_layer in rto_history["disable_all"]:
del rto_history["disable_all"][view_layer]
return {'FINISHED'}
class CMUnDisableViewportAllOperator(Operator):
bl_label = "[DV Global] Disable in Viewports"
bl_description = (
" * LMB - Enable all/Restore.\n"
" * Shift+LMB - Invert.\n"
" * Ctrl+LMB - Copy/Paste RTOs.\n"
" * Ctrl+Alt+LMB - Swap RTOs.\n"
" * Alt+LMB - Discard history"
)
bl_idname = "view3d.un_disable_viewport_all_collections"
bl_options = {'REGISTER', 'UNDO'}
def invoke(self, context, event):
global rto_history
view_layer = context.view_layer.name
modifiers = get_modifiers(event)
if not view_layer in rto_history["disable_all"]:
rto_history["disable_all"][view_layer] = []
if modifiers == {"alt"}:
# clear all states
del rto_history["disable_all"][view_layer]
clear_copy("disable")
clear_swap("disable")
elif modifiers == {"ctrl"}:
copy_rtos(view_layer, "disable")
elif modifiers == {"ctrl", "alt"}:
swap_rtos(view_layer, "disable")
elif modifiers == {"shift"}:
invert_rtos(view_layer, "disable")
else:
activate_all_rtos(view_layer, "disable")
return {'FINISHED'}
class CMDisableRenderOperator(Operator):
bl_label = "[RR] Disable in Renders"
bl_description = (
" * Shift+LMB - Isolate/Restore.\n"
" * Shift+Ctrl+LMB - Isolate nested/Restore.\n"
" * Ctrl+LMB - Toggle nested.\n"
" * Alt+LMB - Discard history"
)
bl_idname = "view3d.disable_render_collection"
bl_options = {'REGISTER', 'UNDO'}
name: StringProperty()
# static class var
isolated = False
def invoke(self, context, event):
global rto_history
cls = CMDisableRenderOperator
modifiers = get_modifiers(event)
view_layer = context.view_layer.name
laycol_ptr = layer_collections[self.name]["ptr"]
if not view_layer in rto_history["render"]:
rto_history["render"][view_layer] = {"target": "", "history": []}
if modifiers == {"alt"}:
del rto_history["render"][view_layer]
cls.isolated = False
elif modifiers == {"shift"}:
isolate_rto(cls, self, view_layer, "render")
elif modifiers == {"ctrl"}:
toggle_children(self, view_layer, "render")
cls.isolated = False
elif modifiers == {"ctrl", "shift"}:
isolate_rto(cls, self, view_layer, "render", children=True)
else:
# toggle renderable
# reset render history
del rto_history["render"][view_layer]
# toggle renderability of collection
laycol_ptr.collection.hide_render = not laycol_ptr.collection.hide_render
cls.isolated = False
# reset render all history
if view_layer in rto_history["render_all"]:
del rto_history["render_all"][view_layer]
return {'FINISHED'}
class CMUnDisableRenderAllOperator(Operator):
bl_label = "[RR Global] Disable in Renders"
bl_description = (
" * LMB - Enable all/Restore.\n"
" * Shift+LMB - Invert.\n"
" * Ctrl+LMB - Copy/Paste RTOs.\n"
" * Ctrl+Alt+LMB - Swap RTOs.\n"
" * Alt+LMB - Discard history"
)
bl_idname = "view3d.un_disable_render_all_collections"
bl_options = {'REGISTER', 'UNDO'}
def invoke(self, context, event):
global rto_history
view_layer = context.view_layer.name
modifiers = get_modifiers(event)
if not view_layer in rto_history["render_all"]:
rto_history["render_all"][view_layer] = []
if modifiers == {"alt"}:
# clear all states
del rto_history["render_all"][view_layer]
clear_copy("render")
clear_swap("render")
elif modifiers == {"ctrl"}:
copy_rtos(view_layer, "render")
elif modifiers == {"ctrl", "alt"}:
swap_rtos(view_layer, "render")
elif modifiers == {"shift"}:
invert_rtos(view_layer, "render")
else:
activate_all_rtos(view_layer, "render")
return {'FINISHED'}
class CMHoldoutOperator(Operator):
bl_label = "[HH] Holdout"
bl_description = (
" * Shift+LMB - Isolate/Restore.\n"
" * Shift+Ctrl+LMB - Isolate nested/Restore.\n"
" * Ctrl+LMB - Toggle nested.\n"
" * Alt+LMB - Discard history"
)
bl_idname = "view3d.holdout_collection"
bl_options = {'REGISTER', 'UNDO'}
name: StringProperty()
# static class var
isolated = False
def invoke(self, context, event):
global rto_history
cls = CMHoldoutOperator
modifiers = get_modifiers(event)
view_layer = context.view_layer.name
laycol_ptr = layer_collections[self.name]["ptr"]
if not view_layer in rto_history["holdout"]:
rto_history["holdout"][view_layer] = {"target": "", "history": []}
if modifiers == {"alt"}:
del rto_history["holdout"][view_layer]
cls.isolated = False
elif modifiers == {"shift"}:
isolate_rto(cls, self, view_layer, "holdout")
elif modifiers == {"ctrl"}:
toggle_children(self, view_layer, "holdout")
cls.isolated = False
elif modifiers == {"ctrl", "shift"}:
isolate_rto(cls, self, view_layer, "holdout", children=True)
else:
# toggle holdout
# reset holdout history
del rto_history["holdout"][view_layer]
# toggle holdout of collection in viewport
laycol_ptr.holdout = not laycol_ptr.holdout
cls.isolated = False
# reset holdout all history
if view_layer in rto_history["holdout_all"]:
del rto_history["holdout_all"][view_layer]
return {'FINISHED'}
class CMUnHoldoutAllOperator(Operator):
bl_label = "[HH Global] Holdout"
bl_description = (
" * LMB - Enable all/Restore.\n"
" * Shift+LMB - Invert.\n"
" * Ctrl+LMB - Copy/Paste RTOs.\n"
" * Ctrl+Alt+LMB - Swap RTOs.\n"
" * Alt+LMB - Discard history"
)
bl_idname = "view3d.un_holdout_all_collections"
bl_options = {'REGISTER', 'UNDO'}
def invoke(self, context, event):
global rto_history
view_layer = context.view_layer.name
modifiers = get_modifiers(event)
if not view_layer in rto_history["holdout_all"]:
rto_history["holdout_all"][view_layer] = []
if modifiers == {"alt"}:
# clear all states
del rto_history["holdout_all"][view_layer]
clear_copy("holdout")
clear_swap("holdout")
elif modifiers == {"ctrl"}:
copy_rtos(view_layer, "holdout")
elif modifiers == {"ctrl", "alt"}:
swap_rtos(view_layer, "holdout")
elif modifiers == {"shift"}:
invert_rtos(view_layer, "holdout")
else:
activate_all_rtos(view_layer, "holdout")
return {'FINISHED'}
class CMIndirectOnlyOperator(Operator):
bl_label = "[IO] Indirect Only"
bl_description = (
" * Shift+LMB - Isolate/Restore.\n"
" * Shift+Ctrl+LMB - Isolate nested/Restore.\n"
" * Ctrl+LMB - Toggle nested.\n"
" * Alt+LMB - Discard history"
)
bl_idname = "view3d.indirect_only_collection"
bl_options = {'REGISTER', 'UNDO'}
name: StringProperty()
# static class var
isolated = False
def invoke(self, context, event):
global rto_history
cls = CMIndirectOnlyOperator
modifiers = get_modifiers(event)
view_layer = context.view_layer.name
laycol_ptr = layer_collections[self.name]["ptr"]
if not view_layer in rto_history["indirect"]:
rto_history["indirect"][view_layer] = {"target": "", "history": []}
if modifiers == {"alt"}:
del rto_history["indirect"][view_layer]
cls.isolated = False
elif modifiers == {"shift"}:
isolate_rto(cls, self, view_layer, "indirect")
elif modifiers == {"ctrl"}:
toggle_children(self, view_layer, "indirect")
cls.isolated = False
elif modifiers | |
z3.is_true(m.eval(encode_equality(t,w))):
return w
# print "model: {}".format(m.sexpr())
# print "term: {}".format(t)
res = ivy_logic.Constant(ivy_logic.Symbol(s.defines()[0],s))
print "warning: model doesn't give a value for enumerated term {}. returning {}.".format(t,res)
return res
# assert False # model doesn't give a value for enumerated term
return constant_from_z3(m.eval(term_to_z3(t)))
def clauses_imply(clauses1, clauses2):
"""True if clauses1 imply clauses2.
"""
s = z3.Solver()
z1 = clauses_to_z3(clauses1)
# print "z1 = {}".format(z1)
s.add(z1)
z2 = not_clauses_to_z3(clauses2)
# print "z2 = {}".format(z2)
s.add(z2)
return s.check() == z3.unsat
def not_clauses_to_z3(clauses):
# Separate the definition of skolems
sdefs,defs = [],[]
for dfn in clauses.defs:
(sdefs if dfn.defines().is_skolem() else defs).append(dfn)
dcls = Clauses([],sdefs)
cls = Clauses(clauses.fmlas,defs)
# print "not_clauses_to_z3: dcls: {} cls: {}".format(dcls,cls)
return z3.And(clauses_to_z3(dcls),z3.Not(clauses_to_z3(clauses)))
def clauses_sat(clauses1):
"""True if clauses1 imply clauses2.
"""
s = z3.Solver()
s.add(clauses_to_z3(clauses1))
return s.check() != z3.unsat
def remove_duplicates_clauses(clauses):
# tricky: store all z3 fmlas in list so not GC'd until all id's computed!
z3fs = [(c,formula_to_z3(c)) for c in clauses.fmlas]
return Clauses(list(ivy_utils.unique2((x,get_id(y)) for x,y in z3fs)),clauses.defs)
def clauses_case(clauses1):
""" Drop literals in a clause set while maintaining satisfiability.
This only works for quantifier-free clauses. """
s = z3.Solver()
s.add(clauses_to_z3(clauses1))
if s.check() == z3.unsat:
return [[]]
m = get_model(s)
# print "clauses_case: after SAT check"
## print "clauses1: {}".format(clauses1)
clauses = Clauses([clause_model_simp(m,c) for c in clauses1.clauses])
clauses = remove_duplicates_clauses(clauses)
while True:
# print "clause_case: starting iteration"
# print "clauses: {}".format(clauses)
num_old_clauses = len(clauses.clauses)
r = ur.UnitRes(clauses.clauses)
with r.context():
r.propagate()
# print "clause_case: after propagation"
new_clauses = Clauses([[l] for l in r.unit_queue] + r.clauses)
# print "new_clauses: {}".format(new_clauses)
clauses = Clauses([clause_model_simp(m,c) for c in new_clauses.clauses])
# print "clause_case: after model-based simplification"
clauses = remove_duplicates_clauses(clauses)
# print "clause_case: after duplicate removal"
if len(clauses.clauses) <= num_old_clauses:
return clauses
def clause_model_simp(m,c):
""" Simplify a clause by dropping literals while maintaining its truth in a model. """
res = []
for l in c:
if not is_ground_lit(l):
res.append(l)
continue
if isinstance(l.atom,ivy_logic.And):
print "clause_model_simp: {}".format(c)
v = m.eval(literal_to_z3(l))
if z3.is_true(v):
return [l]
if not z3.is_false(v):
res.append(l)
return res
def get_model_clauses(clauses1):
s = z3.Solver()
z3c = clauses_to_z3(clauses1)
s.add(z3c)
if s.check() == z3.unsat:
return None
m = get_model(s)
return HerbrandModel(s,m)
def sort_size_constraint(sort,size):
if isinstance(sort,ivy_logic.UninterpretedSort):
syms = [ivy_logic.Symbol('__'+sort.name+'$'+str(i),sort) for i in range(size)]
v = ivy_logic.Variable('X'+sort.name,sort)
res = ivy_logic.Or(*[ivy_logic.Equals(v,s) for s in syms])
# print "sort_size_constraint : {}".format(res)
return res
return ivy_logic.And()
def relation_size_constraint(relation, size):
assert type(relation) is lg.Const
assert type(relation.sort) is lg.FunctionSort
consts = [
[
lg.Const('__${}${}${}'.format(relation.name, i, j), s)
for j, s in enumerate(relation.sort.domain)
]
for i in range(size)
]
vs = [
lg.Var('X${}${}'.format(relation.name, j), s)
for j, s in enumerate(relation.sort.domain)
]
result = lg.Or(lg.Not(relation(*vs)), *(
lg.And(*(
lg.Eq(c, v) for c, v in zip(cs,vs)
))
for cs in consts
))
print "relation_size_constraint: {}".format(result)
return result
def size_constraint(x, size):
if type(x) is lg.UninterpretedSort:
return sort_size_constraint(x, size)
elif type(x) is lg.Const and type(x.sort) is lg.FunctionSort:
return relation_size_constraint(x, size)
else:
return lg.And()
def model_if_none(clauses1,implied,model):
h = model
if h == None:
s = z3.Solver()
z3c = clauses_to_z3(clauses1)
s.add(z3c)
if implied != None:
s.add(not_clauses_to_z3(implied))
sort_size = 1
while True:
s.push()
for sort in ivy_logic.sorts():
s.add(formula_to_z3(sort_size_constraint(sort,sort_size)))
if s.check() != z3.unsat:
m = get_model(s)
print "model = {}, size = {}".format(m,sort_size)
## print "clauses1 = {}".format(clauses1)
## print "z3c = {}".format(str(z3c))
h = HerbrandModel(s,m)
s.pop()
return h
sort_size += 1
s.pop()
return h
def get_small_model(clauses, sorts_to_minimize, relations_to_minimize):
"""
Return a HerbrandModel with a "small" model of clauses.
sorts_to_minimize is a list of sorts, and relations_to_minimize is
a list of relations,
The model minimization occurs in 2 ways:
First, minimize universe size lexicographically according to the order of
sorts_to_minimize.
Second, minimize the number of positive entries in the relations
according to the order of relations_to_minimize.
"""
s = z3.Solver()
s.add(clauses_to_z3(clauses))
if s.check() == z3.unsat:
return None
print "shrinking model {"
for x in chain(sorts_to_minimize, relations_to_minimize):
for n in itertools.count(1):
s.push()
s.add(formula_to_z3(size_constraint(x, n)))
if s.check() == z3.sat:
break
else:
s.pop()
print "} shrinking model"
m = get_model(s)
h = HerbrandModel(s,m)
return h
def model_universe_facts(h,sort,upclose):
# get universe elements
elems = h.sort_universe(sort)
# constraint defining universe
uc = []
if not upclose:
uc = [[ivy_logic._eq_lit(ivy_logic.Variable('X',c.sort),c) for c in elems]]
# universe elements are distinct
dc = [[ivy_logic._neq_lit(c1,c2)]
for (c1,c2) in ivy_utils.distinct_unordered_pairs(elems)]
return uc+dc
def model_facts(h,ignore,clauses1,upclose=False):
# define the universe for each sort:
uc = [fact for s in h.sorts() for fact in model_universe_facts(h,s,upclose)]
# values of constants in formula
temp = [[(ivy_logic.Constant(c),
get_model_constant(h.model,ivy_logic.Constant(c)))]
for c in used_constants_clauses(clauses1)
if not ignore(c) and c not in ivy_logic.sig.constructors]
# print "temp = {}".format(temp)
vc = [[ivy_logic._eq_lit(ivy_logic.Constant(c),
get_model_constant(h.model,ivy_logic.Constant(c)))]
for c in used_constants_clauses(clauses1)
if not ignore(c) and c not in ivy_logic.sig.constructors]
# print "model_facts vc = {}".format(vc)
# values of relations in formula
# print "used_relations_clauses = {}".format(used_relations_clauses(clauses1))
vr = [[l]
for (r,n) in used_relations_clauses(clauses1).iteritems()
if not ignore(r)
for l in relation_model_to_clauses(h,r,n)]
# values of functions in formula
fns = set(f for (f,arity) in functions_clauses(clauses1) if not ignore(f) and arity >= 1)
vf = [[l] for f in fns for l in function_model_to_clauses(h,f)]
res = uc + vc + vr + vf
return Clauses(res)
#def numeral_assign(h):
# return m = dict((c.rep,ivy_logic.Constant(c.rep.rename(lambda s:str(i))))
# for s in h.sorts() for i,c in enumerate(h.sorted_sort_universe(s)))
def numeral_assign(clauses,h):
num_by_sort = defaultdict(list)
numerals = [c for c in used_constants_clauses(clauses) if c.is_numeral()]
for num in numerals:
num_by_sort[num.sort].append(num)
# print "num_by_sort: {}".format(numerals)
foom = dict()
used = set()
# print "starting: foom = {}".format(foom)
for s in h.sorts():
# print "sort loop: sort = {}, foom = {}".format(s,foom)
for num in num_by_sort[s]:
# print "foom = {}".format(foom)
numv = h.eval_constant(num)
# print "eval: {}:{} = {}".format(num,num.sort,numv)
if numv in foom:
print "two numerals assigned same value!: {} = {}".format(num,foom[numv])
else:
# print "assigning {} to {}".format(num,numv)
foom[numv] = num
used.add(num)
i = 0
for c in h.sorted_sort_universe(s):
if c not in foom:
while True:
num = ivy_logic.Constant(c.rep.rename(lambda s:str(i)))
i = i + 1
if num not in used:
foom[c.rep] = num
break
return foom
def clauses_model_to_clauses(clauses1,ignore = None, implied = None,model = None, numerals=False):
""" Return a model of clauses1 or None. Model is represented by a
clause set that uniquely characterizes it. The function "ignore", if
provided, returns true for symbols that should be ignored in the
model (tyipically skolems).
"""
## print "clauses_model_to_clauses clauses1 = {}".format(clauses1)
h = model_if_none(clauses1,implied,model)
ignore = ignore if ignore != None else lambda x: False
res = model_facts(h,ignore,clauses1)
# print "core after mode_facts: {} ".format(unsat_core(res,true_clauses()))
# if using numerals, replace the universe elements with them
if numerals:
m = numeral_assign(res,h)
# print "dict: {}".format([(str(x),str(y)) for x,y in m.iteritems()])
# else, existentially quantify the names of the universe elements
else:
m = dict((c.rep,ivy_logic.Constant(c.rep.prefix('__')))
for s in h.sorts() for c in h.sort_universe(s))
res = substitute_constants_clauses(res,m)
# print "core after rename: {} ".format(unsat_core(res,true_clauses()))
# print "clauses_model_to_clauses res = {}".format(res)
return res
def clauses_model_to_diagram(clauses1,ignore = None, implied = None,model = None,axioms=None,weaken=True):
""" Return a diagram of a model of clauses1 or None. The function "ignore", if
provided, returns true for symbols that should be ignored in the
diagram.
"""
print "clauses_model_to_diagram clauses1 = {}".format(clauses1)
if axioms == None:
axioms = true_clauses
h = model_if_none(and_clauses(clauses1,axioms),implied,model)
ignore = ignore if ignore != None else lambda x: False
res = model_facts(h,(lambda x: False),clauses1,upclose=True) # why not pass axioms?
print "clauses_model_to_diagram res = {}".format(res)
# find representative elements
# find representatives of universe elements
reps = dict()
for c in used_constants_clauses(clauses1):
# print "constant: {}".format(c)
mc = get_model_constant(h.model,ivy_logic.Constant(c))
# print "value: {}".format(mc)
if mc.rep not in reps or reps[mc.rep].rep.is_skolem() and not c.is_skolem():
reps[mc.rep] = ivy_logic.Constant(c)
for s in h.sorts():
for e in h.sort_universe(s):
if e.rep not in reps:
reps[e.rep] = e.rep.skolem()()
print "clauses_model_to_diagram reps = {}".format(reps)
# filter out clauses using universe elements without reps
# res = [cls for cls in res if all(c in reps for c in used_constants_clause(cls))]
# replace universe elements with their reps
print "clauses_model_to_diagram res = {}".format(res)
res = substitute_constants_clauses(res,reps)
# filter defined skolems
# this caused a bug in the leader example. the generated diagram did not satisfy clauses1
res.fmlas = [f for f in res.fmlas if not any((x.is_skolem() and x in clauses1.defidx) for x in used_symbols_ast(f))]
print "clauses_model_to_diagram res = {}".format(res)
uc = Clauses([[ivy_logic._eq_lit(ivy_logic.Variable('X',c.get_sort()),reps[c.rep])
for c in h.sort_universe(s)] for s in h.sorts()])
print "clauses_model_to_diagram uc = | |
<filename>heat/tests/test_volume.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mox
from testtools import skipIf
from heat.common import exception
from heat.common import template_format
from heat.engine import scheduler
from heat.engine.resources import instance
from heat.engine.resources import volume as vol
from heat.engine import clients
from heat.engine import resource
from heat.openstack.common.importutils import try_import
from heat.tests.common import HeatTestCase
from heat.tests.v1_1 import fakes
from heat.tests import utils
from cinderclient.v1 import client as cinderclient
volume_backups = try_import('cinderclient.v1.volume_backups')
volume_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Volume Test",
"Parameters" : {},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "foo",
"InstanceType" : "m1.large",
"KeyName" : "test",
"UserData" : "some data"
}
},
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"Properties" : {
"Size" : "1",
"AvailabilityZone" : {"Fn::GetAtt": ["WikiDatabase",
"AvailabilityZone"]},
"Tags" : [{ "Key" : "Usage", "Value" : "Wiki Data Volume" }]
}
},
"MountPoint" : {
"Type" : "AWS::EC2::VolumeAttachment",
"Properties" : {
"InstanceId" : { "Ref" : "WikiDatabase" },
"VolumeId" : { "Ref" : "DataVolume" },
"Device" : "/dev/vdc"
}
}
}
}
'''
class VolumeTest(HeatTestCase):
def setUp(self):
super(VolumeTest, self).setUp()
self.fc = fakes.FakeClient()
self.cinder_fc = cinderclient.Client('username', 'password')
self.m.StubOutWithMock(clients.OpenStackClients, 'cinder')
self.m.StubOutWithMock(clients.OpenStackClients, 'nova')
self.m.StubOutWithMock(self.cinder_fc.volumes, 'create')
self.m.StubOutWithMock(self.cinder_fc.volumes, 'get')
self.m.StubOutWithMock(self.cinder_fc.volumes, 'delete')
self.m.StubOutWithMock(self.fc.volumes, 'create_server_volume')
self.m.StubOutWithMock(self.fc.volumes, 'delete_server_volume')
utils.setup_dummy_db()
def create_volume(self, t, stack, resource_name):
data = t['Resources'][resource_name]
data['Properties']['AvailabilityZone'] = 'nova'
rsrc = vol.Volume(resource_name, data, stack)
self.assertEqual(rsrc.validate(), None)
scheduler.TaskRunner(rsrc.create)()
self.assertEqual(rsrc.state, (rsrc.CREATE, rsrc.COMPLETE))
return rsrc
def create_attachment(self, t, stack, resource_name):
rsrc = vol.VolumeAttachment(resource_name,
t['Resources'][resource_name],
stack)
self.assertEqual(rsrc.validate(), None)
scheduler.TaskRunner(rsrc.create)()
self.assertEqual(rsrc.state, (rsrc.CREATE, rsrc.COMPLETE))
return rsrc
def _mock_create_volume(self, fv, stack_name):
clients.OpenStackClients.cinder().MultipleTimes().AndReturn(
self.cinder_fc)
vol_name = utils.PhysName(stack_name, 'DataVolume')
self.cinder_fc.volumes.create(
size=u'1', availability_zone='nova',
display_description=vol_name,
display_name=vol_name).AndReturn(fv)
def _stubout_delete_volume(self, fv):
self.m.StubOutWithMock(fv, 'delete')
fv.delete().AndReturn(True)
self.m.StubOutWithMock(fv, 'get')
fv.get().AndReturn(None)
fv.get().AndRaise(
clients.cinderclient.exceptions.NotFound('Not found'))
self.m.ReplayAll()
def _mock_create_server_volume_script(self, fva):
clients.OpenStackClients.nova().MultipleTimes().AndReturn(self.fc)
self.fc.volumes.create_server_volume(
device=u'/dev/vdc', server_id=u'WikiDatabase',
volume_id=u'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get('vol-123').AndReturn(fva)
def test_volume(self):
fv = FakeVolume('creating', 'available')
stack_name = 'test_volume_stack'
# create script
self._mock_create_volume(fv, stack_name)
# delete script
self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
self.m.ReplayAll()
t = template_format.parse(volume_template)
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = self.create_volume(t, stack, 'DataVolume')
self.assertEqual(fv.status, 'available')
self.assertRaises(resource.UpdateReplace,
rsrc.handle_update, {}, {}, {})
fv.status = 'in-use'
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.destroy))
self._stubout_delete_volume(fv)
fv.status = 'available'
scheduler.TaskRunner(rsrc.destroy)()
# Test when volume already deleted
rsrc.state_set(rsrc.CREATE, rsrc.COMPLETE)
scheduler.TaskRunner(rsrc.destroy)()
self.m.VerifyAll()
def test_volume_default_az(self):
fv = FakeVolume('creating', 'available')
stack_name = 'test_volume_stack'
# create script
self.m.StubOutWithMock(instance.Instance, 'handle_create')
self.m.StubOutWithMock(instance.Instance, 'check_create_complete')
self.m.StubOutWithMock(vol.VolumeAttachment, 'handle_create')
self.m.StubOutWithMock(vol.VolumeAttachment, 'check_create_complete')
instance.Instance.handle_create().AndReturn(None)
instance.Instance.check_create_complete(None).AndReturn(True)
clients.OpenStackClients.cinder().MultipleTimes().AndReturn(
self.cinder_fc)
vol_name = utils.PhysName(stack_name, 'DataVolume')
self.cinder_fc.volumes.create(
size=u'1', availability_zone=None,
display_description=vol_name,
display_name=vol_name).AndReturn(fv)
vol.VolumeAttachment.handle_create().AndReturn(None)
vol.VolumeAttachment.check_create_complete(None).AndReturn(True)
# delete script
self.m.StubOutWithMock(instance.Instance, 'handle_delete')
self.m.StubOutWithMock(vol.VolumeAttachment, 'handle_delete')
instance.Instance.handle_delete().AndReturn(None)
self.cinder_fc.volumes.get('vol-123').AndRaise(
clients.cinderclient.exceptions.NotFound('Not found'))
vol.VolumeAttachment.handle_delete().AndReturn(None)
self.m.ReplayAll()
t = template_format.parse(volume_template)
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = stack['DataVolume']
self.assertEqual(rsrc.validate(), None)
scheduler.TaskRunner(stack.create)()
self.assertEqual(rsrc.state, (rsrc.CREATE, rsrc.COMPLETE))
scheduler.TaskRunner(stack.delete)()
self.m.VerifyAll()
def test_volume_create_error(self):
fv = FakeVolume('creating', 'error')
stack_name = 'test_volume_create_error_stack'
self._mock_create_volume(fv, stack_name)
self.m.ReplayAll()
t = template_format.parse(volume_template)
t['Resources']['DataVolume']['Properties']['AvailabilityZone'] = 'nova'
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = vol.Volume('DataVolume',
t['Resources']['DataVolume'],
stack)
create = scheduler.TaskRunner(rsrc.create)
self.assertRaises(exception.ResourceFailure, create)
self.m.VerifyAll()
def test_volume_attachment_error(self):
fv = FakeVolume('creating', 'available')
fva = FakeVolume('attaching', 'error')
stack_name = 'test_volume_attach_error_stack'
self._mock_create_volume(fv, stack_name)
self._mock_create_server_volume_script(fva)
self.m.ReplayAll()
t = template_format.parse(volume_template)
t['Resources']['DataVolume']['Properties']['AvailabilityZone'] = 'nova'
stack = utils.parse_stack(t, stack_name=stack_name)
scheduler.TaskRunner(stack['DataVolume'].create)()
self.assertEqual(fv.status, 'available')
rsrc = vol.VolumeAttachment('MountPoint',
t['Resources']['MountPoint'],
stack)
create = scheduler.TaskRunner(rsrc.create)
self.assertRaises(exception.ResourceFailure, create)
self.m.VerifyAll()
def test_volume_attachment(self):
fv = FakeVolume('creating', 'available')
fva = FakeVolume('attaching', 'in-use')
stack_name = 'test_volume_attach_stack'
self._mock_create_volume(fv, stack_name)
self._mock_create_server_volume_script(fva)
# delete script
fva = FakeVolume('in-use', 'available')
self.fc.volumes.delete_server_volume('WikiDatabase',
'vol-123').AndReturn(None)
self.cinder_fc.volumes.get('vol-123').AndReturn(fva)
self.m.ReplayAll()
t = template_format.parse(volume_template)
t['Resources']['DataVolume']['Properties']['AvailabilityZone'] = 'nova'
stack = utils.parse_stack(t, stack_name=stack_name)
scheduler.TaskRunner(stack['DataVolume'].create)()
self.assertEqual(fv.status, 'available')
rsrc = self.create_attachment(t, stack, 'MountPoint')
self.assertRaises(resource.UpdateReplace,
rsrc.handle_update, {}, {}, {})
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_volume_detachment_err(self):
fv = FakeVolume('creating', 'available')
fva = FakeVolume('in-use', 'available')
stack_name = 'test_volume_detach_stack'
self._mock_create_volume(fv, stack_name)
self._mock_create_server_volume_script(fva)
# delete script
fva = FakeVolume('i-use', 'available')
self.m.StubOutWithMock(fva, 'get')
fva.get().MultipleTimes()
fva.status = "in-use"
self.cinder_fc.volumes.get('vol-123').AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').AndRaise(
clients.novaclient.exceptions.BadRequest('Already detached'))
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').AndRaise(
clients.novaclient.exceptions.NotFound('Not found'))
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').AndRaise(
clients.novaclient.exceptions.NotFound('Not found'))
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').AndRaise(
clients.cinderclient.exceptions.NotFound('Not found'))
self.m.ReplayAll()
t = template_format.parse(volume_template)
t['Resources']['DataVolume']['Properties']['AvailabilityZone'] = 'nova'
stack = utils.parse_stack(t, stack_name=stack_name)
scheduler.TaskRunner(stack['DataVolume'].create)()
self.assertEqual(fv.status, 'available')
rsrc = self.create_attachment(t, stack, 'MountPoint')
self.assertRaises(resource.UpdateReplace,
rsrc.handle_update, {}, {}, {})
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_volume_detach_non_exist(self):
fv = FakeVolume('creating', 'available')
fva = FakeVolume('in-use', 'available')
stack_name = 'test_volume_detach_stack'
self._mock_create_volume(fv, stack_name)
self._mock_create_server_volume_script(fva)
# delete script
self.cinder_fc.volumes.get('vol-123').AndRaise(
clients.cinderclient.exceptions.NotFound('Not found'))
self.m.ReplayAll()
t = template_format.parse(volume_template)
t['Resources']['DataVolume']['Properties']['AvailabilityZone'] = 'nova'
stack = utils.parse_stack(t, stack_name=stack_name)
scheduler.TaskRunner(stack['DataVolume'].create)()
rsrc = self.create_attachment(t, stack, 'MountPoint')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_volume_detach_with_latency(self):
fv = FakeVolume('creating', 'available')
fva = FakeVolume('attaching', 'in-use')
stack_name = 'test_volume_attach_stack'
self._mock_create_volume(fv, stack_name)
self._mock_create_server_volume_script(fva)
# delete script
volume_detach_cycle = 'in-use', 'detaching', 'available'
fva = FakeLatencyVolume(life_cycle=volume_detach_cycle)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.cinder_fc.volumes.get('vol-123').AndReturn(fva)
self.m.ReplayAll()
t = template_format.parse(volume_template)
t['Resources']['DataVolume']['Properties']['AvailabilityZone'] = 'nova'
stack = utils.parse_stack(t, stack_name=stack_name)
scheduler.TaskRunner(stack['DataVolume'].create)()
self.assertEqual(fv.status, 'available')
rsrc = self.create_attachment(t, stack, 'MountPoint')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_volume_detach_with_error(self):
fv = FakeVolume('creating', 'available')
fva = FakeVolume('attaching', 'in-use')
stack_name = 'test_volume_attach_stack'
self._mock_create_volume(fv, stack_name)
self._mock_create_server_volume_script(fva)
# delete script
fva = FakeVolume('in-use', 'error')
self.fc.volumes.delete_server_volume('WikiDatabase',
'vol-123').AndReturn(None)
self.cinder_fc.volumes.get('vol-123').AndReturn(fva)
self.m.ReplayAll()
t = template_format.parse(volume_template)
t['Resources']['DataVolume']['Properties']['AvailabilityZone'] = 'nova'
stack = utils.parse_stack(t, stack_name=stack_name)
scheduler.TaskRunner(stack['DataVolume'].create)()
self.assertEqual(fv.status, 'available')
rsrc = self.create_attachment(t, stack, 'MountPoint')
detach_task = scheduler.TaskRunner(rsrc.delete)
self.assertRaises(exception.ResourceFailure, detach_task)
self.m.VerifyAll()
def test_volume_delete(self):
stack_name = 'test_volume_stack'
fv = FakeVolume('creating', 'available')
fb = FakeBackup('creating', 'available')
self._mock_create_volume(fv, stack_name)
self.m.ReplayAll()
t = template_format.parse(volume_template)
t['Resources']['DataVolume']['DeletionPolicy'] = 'Delete'
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = self.create_volume(t, stack, 'DataVolume')
self.m.StubOutWithMock(rsrc, "handle_delete")
rsrc.handle_delete().AndReturn(None)
self.m.StubOutWithMock(rsrc, "check_delete_complete")
rsrc.check_delete_complete(mox.IgnoreArg()).AndReturn(True)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.destroy)()
self.m.VerifyAll()
@skipIf(volume_backups is None, 'unable to import volume_backups')
def test_snapshot(self):
stack_name = 'test_volume_stack'
fv = FakeVolume('creating', 'available')
fb = FakeBackup('creating', 'available')
self._mock_create_volume(fv, stack_name)
# snapshot script
self.m.StubOutWithMock(self.cinder_fc.backups, 'create')
self.cinder_fc.backups.create('vol-123').AndReturn(fb)
self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
self.m.ReplayAll()
t = template_format.parse(volume_template)
t['Resources']['DataVolume']['DeletionPolicy'] = 'Snapshot'
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = self.create_volume(t, stack, 'DataVolume')
self._stubout_delete_volume(fv)
scheduler.TaskRunner(rsrc.destroy)()
self.m.VerifyAll()
@skipIf(volume_backups is None, 'unable to import volume_backups')
def test_snapshot_error(self):
stack_name = 'test_volume_stack'
fv = FakeVolume('creating', 'available')
fb = FakeBackup('creating', 'error')
self._mock_create_volume(fv, stack_name)
# snapshot script
self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
self.m.StubOutWithMock(self.cinder_fc.backups, 'create')
self.cinder_fc.backups.create('vol-123').AndReturn(fb)
self.m.ReplayAll()
t = template_format.parse(volume_template)
t['Resources']['DataVolume']['DeletionPolicy'] = 'Snapshot'
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = self.create_volume(t, stack, 'DataVolume')
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.destroy))
self.m.VerifyAll()
@skipIf(volume_backups is None, 'unable to import volume_backups')
def test_snapshot_no_volume(self):
stack_name = 'test_volume_stack'
fv = FakeVolume('creating', 'error')
self._mock_create_volume(fv, stack_name)
self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
self.m.ReplayAll()
t = template_format.parse(volume_template)
t['Resources']['DataVolume']['DeletionPolicy'] = 'Snapshot'
t['Resources']['DataVolume']['Properties']['AvailabilityZone'] = 'nova'
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = vol.Volume('DataVolume',
t['Resources']['DataVolume'],
stack)
create = scheduler.TaskRunner(rsrc.create)
self.assertRaises(exception.ResourceFailure, create)
self._stubout_delete_volume(fv)
scheduler.TaskRunner(rsrc.destroy)()
self.m.VerifyAll()
@skipIf(volume_backups is None, 'unable to import volume_backups')
def test_create_from_snapshot(self):
stack_name = 'test_volume_stack'
fv = FakeVolumeWithStateTransition('restoring-backup', 'available')
fvbr = FakeBackupRestore('vol-123')
# create script
clients.OpenStackClients.cinder().MultipleTimes().AndReturn(
self.cinder_fc)
self.m.StubOutWithMock(self.cinder_fc.restores, 'restore')
self.cinder_fc.restores.restore('backup-123').AndReturn(fvbr)
self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
self.m.StubOutWithMock(fv, 'update')
vol_name = utils.PhysName(stack_name, 'DataVolume')
fv.update(
display_description=vol_name,
display_name=vol_name)
self.m.ReplayAll()
t = template_format.parse(volume_template)
t['Resources']['DataVolume']['Properties']['SnapshotId'] = 'backup-123'
stack = utils.parse_stack(t, stack_name=stack_name)
self.create_volume(t, stack, 'DataVolume')
self.assertEqual(fv.status, 'available')
self.m.VerifyAll()
@skipIf(volume_backups is None, 'unable to import volume_backups')
def test_create_from_snapshot_error(self):
stack_name = 'test_volume_stack'
fv = FakeVolumeWithStateTransition('restoring-backup', 'error')
fvbr = FakeBackupRestore('vol-123')
# create script
clients.OpenStackClients.cinder().MultipleTimes().AndReturn(
self.cinder_fc)
self.m.StubOutWithMock(self.cinder_fc.restores, 'restore')
self.cinder_fc.restores.restore('backup-123').AndReturn(fvbr)
self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
self.m.StubOutWithMock(fv, 'update')
vol_name = utils.PhysName(stack_name, 'DataVolume')
fv.update(
display_description=vol_name,
display_name=vol_name)
self.m.ReplayAll()
t = template_format.parse(volume_template)
t['Resources']['DataVolume']['Properties']['SnapshotId'] = 'backup-123'
t['Resources']['DataVolume']['Properties']['AvailabilityZone'] = 'nova'
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = vol.Volume('DataVolume',
t['Resources']['DataVolume'],
stack)
create = scheduler.TaskRunner(rsrc.create)
self.assertRaises(exception.ResourceFailure, create)
self.m.VerifyAll()
def test_cinder_create(self):
fv = FakeVolume('creating', 'available')
stack_name = 'test_volume_stack'
clients.OpenStackClients.cinder().MultipleTimes().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=u'1', availability_zone='nova',
display_description='CustomDescription',
display_name='CustomName',
imageRef='Image1',
snapshot_id='snap-123',
metadata={'key': 'value'},
source_volid='vol-012',
volume_type='lvm').AndReturn(fv)
self.m.ReplayAll()
t = template_format.parse(volume_template)
t['Resources']['DataVolume']['Properties'] = {
'size': '1',
'availability_zone': 'nova',
'name': 'CustomName',
'description': 'CustomDescription',
'volume_type': 'lvm',
'metadata': {'key': 'value'},
# Note that specifying all these arguments doesn't work in
# practice, as they are conflicting, but we just want to check they
# are sent to the backend.
'imageRef': 'Image1',
'snapshot_id': 'snap-123',
'source_volid': 'vol-012',
}
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = vol.CinderVolume('DataVolume',
t['Resources']['DataVolume'],
stack)
self.assertEqual(rsrc.validate(), None)
scheduler.TaskRunner(rsrc.create)()
self.assertEqual(rsrc.state, (rsrc.CREATE, rsrc.COMPLETE))
self.assertEqual(fv.status, 'available')
self.m.VerifyAll()
def test_cinder_create_from_image(self):
fv = FakeVolumeWithStateTransition('downloading', 'available')
stack_name = 'test_volume_stack'
clients.OpenStackClients.cinder().MultipleTimes().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=u'1', availability_zone='nova',
display_description='ImageVolumeDescription',
display_name='ImageVolume',
imageRef='Image1').AndReturn(fv)
self.m.ReplayAll()
t = template_format.parse(volume_template)
t['Resources']['DataVolume']['Properties'] = {
'size': '1',
'name': 'ImageVolume',
'description': 'ImageVolumeDescription',
'availability_zone': 'nova',
'imageRef': 'Image1',
}
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = vol.CinderVolume('DataVolume',
t['Resources']['DataVolume'],
stack)
self.assertEqual(rsrc.validate(), None)
scheduler.TaskRunner(rsrc.create)()
self.assertEqual(rsrc.state, (rsrc.CREATE, rsrc.COMPLETE))
self.assertEqual(fv.status, 'available')
self.m.VerifyAll()
def test_cinder_default(self):
fv = FakeVolume('creating', 'available')
stack_name = 'test_volume_stack'
clients.OpenStackClients.cinder().MultipleTimes().AndReturn(
self.cinder_fc)
vol_name = utils.PhysName(stack_name, 'DataVolume')
self.cinder_fc.volumes.create(
size=u'1', availability_zone='nova',
display_description=None,
display_name=vol_name).AndReturn(fv)
self.m.ReplayAll()
t = template_format.parse(volume_template)
t['Resources']['DataVolume']['Properties'] = {
'size': '1',
'availability_zone': | |
species.generate_resonance_structures()
# Get the thermo data for the molecule
symmetry_number = None
thermo_data_list = []
word_list = []
ref_dict = {}
for data, library, entry in database.thermo.get_all_thermo_data(species):
# Make sure we calculate Cp0 and CpInf
find_cp0_and_cpinf(species, data)
# Round trip conversion via Wilhoit for proper fitting
nasa = process_thermo_data(species, data)
# Generate Chemkin style NASA polynomial
species.thermo = nasa
nasa_string = write_thermo_entry(species)
if library is None:
source = 'Group additivity'
href = ''
ref_dict = parseThermoComment(data.comment)
symmetry_number = species.get_symmetry_number()
entry = Entry(data=data)
if data.comment is not None:
word_list = data.comment.split()
elif library in list(database.thermo.depository.values()):
source = 'Depository'
href = reverse('database:thermo-entry', kwargs={'section': 'depository', 'subsection': library.label, 'index': entry.index})
elif library in list(database.thermo.libraries.values()):
source = library.name
href = reverse('database:thermo-entry', kwargs={'section': 'libraries', 'subsection': library.label, 'index': entry.index})
thermo_data_list.append((
entry,
data,
source,
href,
nasa_string,
))
# Get the structure of the item we are viewing
structure = getStructureInfo(molecule)
return render(request, 'thermoData.html', {'molecule': molecule, 'structure': structure, 'thermo_data_list': thermo_data_list, 'symmetry_number': symmetry_number, 'ref_dict': ref_dict, 'word_list': word_list, 'plotWidth': 500, 'plotHeight': 400 + 15 * len(thermo_data_list)})
def parseThermoComment(comment):
"""
Takes a thermo comment (or any string) as input. Returns a dictionary whose keys
correspond to groups or libraries exactly as they appear in the string, and whose values
correspond to href links that direct to the specific library or group's database page.
"""
ref_dict = {}
# Search for library strings.
# Example: Gas phase thermo for [C]=O from Thermo library: DFT_QCI_thermo + radical(CdCdJ2_triplet). Adsorption correction: + Thermo group additivity estimation: adsorptionPt111(C=*(=R))
library_split_string = comment.split("Thermo library: ")
gas_phase_substring = library_split_string[0].split() # Example: ['Gas', 'phase', 'thermo', 'for', '[C]=O', 'from']
if len(library_split_string) > 1: # if a match was found for "Thermo library: "
if library_split_string[0] != '':
try:
gas_phase_species = gas_phase_substring[-2] # Example: '[C]=O'
except IndexError:
gas_phase_species = ''
else:
gas_phase_species = ''
library_substring = library_split_string[1].split() #Example: ['DFT_QCI_thermo', '+', 'radical(CdCdJ2_triplet).', 'Adsorption', 'correction:', '+', 'Thermo', 'group', 'additivity', 'estimation:', 'adsorptionPt111(C=*(=R))']
library_source_full = library_substring[0] # Example: 'DFT_QCI_thermo'
if library_source_full.endswith('.'): # if the library ends with a period, make sure we get the correct library so we can get its link properly
library_source = library_source_full[::-1].replace('.','',1)[::-1]
else:
library_source = library_source_full
try:
lib_index = database.thermo.libraries[library_source].entries[gas_phase_species].index
ref_dict[library_source_full] = reverse('database:thermo-entry', kwargs={'section': 'libraries', 'subsection': library_source, 'index': lib_index})
except KeyError:
ref_dict[library_source_full] = reverse('database:thermo', kwargs={'section': 'libraries', 'subsection': library_source})
# Search for group additivity substrings
# Example: Gas phase thermo for [C]=O from Thermo library: DFT_QCI_thermo + radical(CdCdJ2_triplet). Adsorption correction: + Thermo group additivity estimation: adsorptionPt111(C=*(=R))
groups_substrings = [word for word in comment.split() if "missing" not in word and '(' and ')' in word] # Example: ['radical(CdCdJ2_triplet).', 'adsorptionPt111(C=*(=R))']
for word in groups_substrings:
group_source_full = word # Example: 'adsorptionPt111(C=*(=R))'
group_name = word.split('(',1)[0] # Example: 'adsorptionPt111'
word = word.split('(',1)[1] # Example: 'C=*(=R))'
word = word[::-1].replace(')','',1)[::-1] # Example: 'C=*(=R)'
if word.endswith('.'): # e.g. in the case of 'CdCdJ2_triplet.'
word = word[::-1].replace('.','',1)[::-1]
try:
group_index = database.thermo.groups[group_name].entries[word].index
ref_dict[group_source_full] = reverse('database:thermo-entry', kwargs={'section': 'groups', 'subsection': group_name, 'index': group_index})
except KeyError:
pass
return ref_dict
################################################################################
def getDatabaseTreeAsList(database, entries):
"""
Return a list of entries in a given database, sorted by the order they
appear in the tree (as determined via a depth-first search).
"""
tree = []
for entry in entries:
# Write current node
tree.append(entry)
# Recursively descend children (depth-first)
tree.extend(getDatabaseTreeAsList(database, entry.children))
return tree
def getKineticsTreeHTML(database, section, subsection, entries):
"""
Return a string of HTML markup used for displaying information about
kinetics entries in a given `database` as a tree of unordered lists.
"""
html = ''
for entry in entries:
# Write current node
url = reverse('database:kinetics-entry', kwargs={'section': section, 'subsection': subsection, 'index': entry.index})
html += '<li class="kineticsEntry">\n'
html += '<div class="kineticsLabel">'
if len(entry.children) > 0:
html += '<img id="button_{0}" class="treeButton" src="{1}"/>'.format(entry.index, static('img/tree-collapse.png'))
else:
html += '<img class="treeButton" src="{0}"/>'.format(static('img/tree-blank.png'))
html += '<a href="{0}">{1}. {2}</a>\n'.format(url, entry.index, entry.label)
html += '<div class="kineticsData">\n'
if entry.data is not None:
for T in [300, 400, 500, 600, 800, 1000, 1500, 2000]:
html += '<span class="kineticsDatum">{0:.2f}</span> '.format(math.log10(entry.data.get_rate_coefficient(T, P=1e5)))
html += '</div>\n'
# Recursively descend children (depth-first)
if len(entry.children) > 0:
html += '<ul id="children_{0}" class="kineticsSubTree">\n'.format(entry.index)
html += getKineticsTreeHTML(database, section, subsection, entry.children)
html += '</ul>\n'
html += '</li>\n'
return html
def getUntrainedReactions(family):
"""
Return a depository containing unique reactions for which no
training data exists.
"""
# Load training depository
for depository in family.depositories:
if 'training' in depository.label:
training = depository
break
else:
raise Exception('Could not find training depository in {0} family.'.format(family.label))
# Load trained reactions
trained_reactions = []
for entry in training.entries.values():
for reaction in trained_reactions:
if reaction.is_isomorphic(entry.item):
break
else:
trained_reactions.append(entry.item)
# Load untrained reactions
untrained_reactions = []
for depository in family.depositories:
if 'training' not in depository.label:
for entry in depository.entries.values():
for reaction in trained_reactions:
if reaction.is_isomorphic(entry.item):
break
else:
for reaction in untrained_reactions:
if reaction.is_isomorphic(entry.item):
break
else:
untrained_reactions.append(entry.item)
# Sort reactions by reactant size
untrained_reactions.sort(key=lambda reaction: sum([1 for r in reaction.reactants for a in r.molecule[0].atoms if a.is_non_hydrogen()]))
# Build entries
untrained = KineticsDepository(name='{0}/untrained'.format(family.label),
label='{0}/untrained'.format(family.label))
count = 1
for reaction in untrained_reactions:
untrained.entries['{0}'.format(count)] = Entry(
item=reaction,
index=count,
label=getReactionUrl(reaction),
)
count += 1
return untrained
###############################################################################
def queryNIST(entry, squib, entries, user):
"""
Pulls NIST kinetics and reference information, given
a unique entry squib (e.g. `1999SMI/GOL57-101:3`).
"""
url = 'http://kinetics.nist.gov/kinetics/Detail?id={0}'.format(squib)
cookiejar = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookiejar))
# Set units
post = {'energyUnits': 'J',
'evaluationTemperature': '300.0',
'moleculeUnits': 'Mole',
'pressureUnits': 'Pa',
'referenceTemperature': '1.0',
'temperatureUnits': 'K',
'volumeUnits': 'cm',
}
request = opener.open('http://kinetics.nist.gov/kinetics/'
'SetUnitsBean.jsp', data=urllib.parse.urlencode(post))
request.close()
# Grab kinetics for a NIST entry from the full bibliographic page.
full_url = ('http://kinetics.nist.gov/kinetics/'
'Detail?id={0}:0'.format(squib.split(':')[0]))
request = opener.open(full_url)
soup = BeautifulSoup(request.read())
request.close()
# Find table on page corresponding to kinetics entries
try:
form = soup.findAll(name='form',
attrs={'name': 'KineticsResults'})[0]
except:
return 'No results found for {0}.'.format(squib)
# Find row in table corresponding to squib
for tr in form.findAll(name='tr'):
tdlist = tr.findAll(name='td')
if len(tdlist) == 17 and tr.findAll(name='input', value=squib):
break
else:
return 'No results found for {0}.'.format(squib)
# Assert entry is not a reference reaction
try:
if 'Reference reaction' in tr.findNext(name='tr').text:
return 'Entry is a reference reaction.'
except:
pass
# Check reaction order
try:
order = int(tdlist[16].text)
if order != len(entry.item.reactants):
return 'Reaction order does not match number of reactants.'
except:
return 'Invalid reaction order.'
# Grab pre-exponential
A = tdlist[8].text
if ' ' in A:
return 'Invalid pre-exponential.'
if ';' in A:
A = A.split(';')[1]
if order == 1:
entry.data.A = Quantity(float(A), 's^-1')
elif order == 2:
entry.data.A = Quantity(float(A) / 1.0e6, 'm^3/(mol*s)')
else:
return 'Unexpected reaction order encountered.'
# Grab temperature exponent
n = tdlist[10].text
if n == ' ':
n = 0.0
entry.data.n = Quantity(float(n), '')
# Grab activation energy
Ea = tdlist[12].text
if ' ' in Ea:
Ea = 0.0
elif ';' in Ea:
Ea = Ea.split(';')[1]
entry.data.Ea = Quantity(float(Ea) / 1.0e3, 'kJ/mol')
# Grab reference and miscellaneous data from NIST entry page.
request = opener.open(url)
html = request.read().replace('<p>', '<BR><BR>').replace('<P>',
'<BR><BR>')
soup = BeautifulSoup(html)
request.close()
# Grab reference
try:
type = soup.findAll('b', text='Reference type:')[0].parent
type = type.nextSibling[13:].lower()
if type == 'technical report' or type == 'journal article':
type = 'journal'
if type == 'book chapter':
type = 'book'
except:
type = None
if type not in ['journal', 'book']:
entry.reference = None
else:
if type == 'journal':
entry.reference = Article(authors=[])
# Grab journal title
try:
journal = soup.findAll('b', text='Journal:')[0].parent
entry.reference.journal = journal.nextSibling[13:]
except:
pass
# Grab volume number
try:
volume = soup.findAll('b', text='Volume:')[0].parent
entry.reference.volume = volume.nextSibling[13:]
except:
pass
# Grab pages
try:
pages = soup.findAll('b', text='Page(s):')[0].parent
pages = pages.nextSibling[13:]
if not pages:
pages = re.match(r'\d+[^\d]+([^:]+)', squib).group(1)
except:
pass
entry.reference.pages = pages.replace(' - ', '-')
elif type == 'book':
entry.reference = Book(authors=[])
# Grab publisher
try:
pub = soup.findAll(text='Publisher address:')[0].parent
entry.reference.publisher = pub.nextSibling[13:]
except:
pass
# Grab authors
try:
authors = soup.findAll('b', text='Author(s):')[0].parent
authors = authors.nextSibling[13:]
for author in authors.split(';'):
entry.reference.authors.append(author.strip())
except:
pass
# Grab title
try:
title = soup.findAll('b', text='Title:')[0].parent.nextSibling
entry.reference.title = title[13:]
while True:
title = title.nextSibling
try:
if title.name == 'br':
break
except:
pass
try:
entry.reference.title += title.text
except AttributeError:
entry.reference.title += title
except:
pass
# Grab year
try:
year = soup.findAll('b', text='Year:')[0].parent
entry.reference.year = | |
if A is None else vstack([eyex, A], "csr")
ll = r_[xmin, l]
uu = r_[xmax, u]
# split up linear constraints
ieq = flatnonzero( absolute(uu - ll) <= EPS )
igt = flatnonzero( (uu >= 1e10) & (ll > -1e10) )
ilt = flatnonzero( (ll <= -1e10) & (uu < 1e10) )
ibx = flatnonzero( (absolute(uu - ll) > EPS) & (uu < 1e10) & (ll > -1e10) )
# zero-sized sparse matrices unsupported
Ae = AA[ieq, :] if len(ieq) else None
if len(ilt) or len(igt) or len(ibx):
idxs = [(1, ilt), (-1, igt), (1, ibx), (-1, ibx)]
Ai = vstack([sig * AA[idx, :] for sig, idx in idxs if len(idx)])
else:
Ai = None
be = uu[ieq, :]
bi = r_[uu[ilt], -ll[igt], uu[ibx], -ll[ibx]]
# evaluate cost f(x0) and constraints g(x0), h(x0)
x = x0
f, df, _ = f_fcn(x) # cost
f = f * opt["cost_mult"]
df = df * opt["cost_mult"]
if nonlinear:
hn, gn, dhn, dgn = gh_fcn(x) # non-linear constraints
h = hn if Ai is None else r_[hn, Ai * x - bi] # inequality constraints
g = gn if Ae is None else r_[gn, Ae * x - be] # equality constraints
if (dhn is None) and (Ai is None):
dh = None
elif dhn is None:
dh = Ai.T
elif Ae is None:
dh = dhn
else:
dh = hstack([dhn, Ai.T])
if (dgn is None) and (Ae is None):
dg = None
elif dgn is None:
dg = Ae.T
elif Ae is None:
dg = dgn
else:
dg = hstack([dgn, Ae.T])
else:
h = -bi if Ai is None else Ai * x - bi # inequality constraints
g = -be if Ae is None else Ae * x - be # equality constraints
dh = None if Ai is None else Ai.T # 1st derivative of inequalities
dg = None if Ae is None else Ae.T # 1st derivative of equalities
# some dimensions
neq = g.shape[0] # number of equality constraints
niq = h.shape[0] # number of inequality constraints
neqnln = gn.shape[0] # number of non-linear equality constraints
niqnln = hn.shape[0] # number of non-linear inequality constraints
nlt = len(ilt) # number of upper bounded linear inequalities
ngt = len(igt) # number of lower bounded linear inequalities
nbx = len(ibx) # number of doubly bounded linear inequalities
# initialize gamma, lam, mu, z, e
gamma = 1 # barrier coefficient
lam = zeros(neq)
z = z0 * ones(niq)
mu = z0 * ones(niq)
k = flatnonzero(h < -z0)
z[k] = -h[k]
k = flatnonzero((gamma / z) > z0)
mu[k] = gamma / z[k]
e = ones(niq)
# check tolerance
f0 = f
# if opt["step_control"]:
# L = f + lam.T * g + mu.T * (h + z) - gamma * sum(log(z))
Lx = df
Lx = Lx + dg * lam if dg is not None else Lx
Lx = Lx + dh * mu if dh is not None else Lx
gnorm = norm(g, Inf) if len(g) else 0.0
lam_norm = norm(lam, Inf) if len(lam) else 0.0
mu_norm = norm(mu, Inf) if len(mu) else 0.0
feascond = \
max([gnorm, max(h)]) / (1 + max([norm(x, Inf), norm(z, Inf)]))
gradcond = \
norm(Lx, Inf) / (1 + max([lam_norm, mu_norm]))
compcond = dot(z, mu) / (1 + norm(x, Inf))
costcond = absolute(f - f0) / (1 + absolute(f0))
# save history
hist[i] = {'feascond': feascond, 'gradcond': gradcond,
'compcond': compcond, 'costcond': costcond, 'gamma': gamma,
'stepsize': 0, 'obj': f / opt["cost_mult"], 'alphap': 0, 'alphad': 0}
if opt["verbose"]:
# s = '-sc' if opt["step_control"] else ''
# version, date = '1.0b2', '24-Mar-2010'
# print 'Python Interior Point Solver - PIPS%s, Version %s, %s' % \
# (s, version, date)
print " it objective step size feascond gradcond " \
"compcond costcond "
print "---- ------------ --------- ------------ ------------ " \
"------------ ------------"
print "%3d %12.8g %10s %12g %12g %12g %12g" % \
(i, (f / opt["cost_mult"]), "",
feascond, gradcond, compcond, costcond)
if feascond < opt["feastol"] and gradcond < opt["gradtol"] and \
compcond < opt["comptol"] and costcond < opt["costtol"]:
converged = True
if opt["verbose"]:
print "Converged!"
# do Newton iterations
while (not converged and i < opt["max_it"]):
# update iteration counter
i += 1
# compute update step
lmbda = {"eqnonlin": lam[range(neqnln)],
"ineqnonlin": mu[range(niqnln)]}
if nonlinear:
if hess_fcn is None:
print "pips: Hessian evaluation via finite differences " \
"not yet implemented.\nPlease provide " \
"your own hessian evaluation function."
Lxx = hess_fcn(x, lmbda)
else:
_, _, d2f = f_fcn(x) # cost
Lxx = d2f * opt["cost_mult"]
rz = range(len(z))
zinvdiag = csr_matrix((1.0 / z, (rz, rz))) if len(z) else None
rmu = range(len(mu))
mudiag = csr_matrix((mu, (rmu, rmu))) if len(mu) else None
dh_zinv = None if dh is None else dh * zinvdiag
M = Lxx if dh is None else Lxx + dh_zinv * mudiag * dh.T
N = Lx if dh is None else Lx + dh_zinv * (mudiag * h + gamma * e)
Ab = M if dg is None else vstack([
hstack([M, dg]),
hstack([dg.T, csr_matrix((neq, neq))])
])
bb = r_[-N, -g]
dxdlam = spsolve(Ab.tocsr(), bb)
dx = dxdlam[:nx]
dlam = dxdlam[nx:nx + neq]
dz = -h - z if dh is None else -h - z - dh.T * dx
dmu = -mu if dh is None else -mu + zinvdiag * (gamma * e - mudiag * dz)
# optional step-size control
# sc = False
if opt["step_control"]:
raise NotImplementedError
# x1 = x + dx
#
# # evaluate cost, constraints, derivatives at x1
# f1, df1 = ipm_f(x1) # cost
# f1 = f1 * opt["cost_mult"]
# df1 = df1 * opt["cost_mult"]
# gn1, hn1, dgn1, dhn1 = ipm_gh(x1) # non-linear constraints
# g1 = gn1 if Ai is None else r_[gn1, Ai * x1 - bi] # ieq constraints
# h1 = hn1 if Ae is None else r_[hn1, Ae * x1 - be] # eq constraints
# dg1 = dgn1 if Ai is None else r_[dgn1, Ai.T] # 1st der of ieq
# dh1 = dhn1 if Ae is None else r_[dhn1, Ae.T] # 1st der of eqs
#
# # check tolerance
# Lx1 = df1 + dh1 * lam + dg1 * mu
# feascond1 = max([ norm(h1, Inf), max(g1) ]) / \
# (1 + max([ norm(x1, Inf), norm(z, Inf) ]))
# gradcond1 = norm(Lx1, Inf) / \
# (1 + max([ norm(lam, Inf), norm(mu, Inf) ]))
#
# if feascond1 > feascond and gradcond1 > gradcond:
# sc = True
# if sc:
# alpha = 1.0
# for j in range(opt["max_red"]):
# dx1 = alpha * dx
# x1 = x + dx1
# f1 = ipm_f(x1) # cost
# f1 = f1 * opt["cost_mult"]
# gn1, hn1 = ipm_gh(x1) # non-linear constraints
# g1 = r_[gn1, Ai * x1 - bi] # inequality constraints
# h1 = r_[hn1, Ae * x1 - be] # equality constraints
# L1 = f1 + lam.H * h1 + mu.H * (g1 + z) - gamma * sum(log(z))
# if opt["verbose"]:
# logger.info("\n %3d %10.f" % (-j, norm(dx1)))
# rho = (L1 - L) / (Lx.H * dx1 + 0.5 * dx1.H * Lxx * dx1)
# if rho > rho_min and rho < rho_max:
# break
# else:
# alpha = alpha / 2.0
# dx = alpha * dx
# dz = alpha * dz
# dlam = alpha * dlam
# dmu = alpha * dmu
# do the update
k = flatnonzero(dz < 0.0)
alphap = min([xi * min(z[k] / -dz[k]), 1]) if len(k) else 1.0
k = flatnonzero(dmu < 0.0)
alphad = min([xi * min(mu[k] / -dmu[k]), 1]) if len(k) else 1.0
x = x + alphap * dx
z = z + alphap * dz
lam = lam + alphad * dlam
mu = mu + alphad * dmu
if niq > 0:
gamma = sigma * dot(z, mu) / niq
# evaluate cost, constraints, derivatives
f, df, _ = f_fcn(x) # cost
f = f | |
<filename>win/devkit/other/pymel/extras/completion/py/PySide/QtNetwork.py
from PySide.QtCore import QObject as _QObject
from PySide.QtCore import QIODevice as _QIODevice
class _Object(object):
__dict__ = None
class QNetworkCookieJar(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def allCookies(*args, **kwargs):
pass
def cookiesForUrl(*args, **kwargs):
pass
def setAllCookies(*args, **kwargs):
pass
def setCookiesFromUrl(*args, **kwargs):
pass
__new__ = None
staticMetaObject = None
class QNetworkProxy(_Object):
def __copy__(*args, **kwargs):
pass
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def capabilities(*args, **kwargs):
pass
def hostName(*args, **kwargs):
pass
def isCachingProxy(*args, **kwargs):
pass
def isTransparentProxy(*args, **kwargs):
pass
def password(*args, **kwargs):
pass
def port(*args, **kwargs):
pass
def setCapabilities(*args, **kwargs):
pass
def setHostName(*args, **kwargs):
pass
def setPassword(*args, **kwargs):
pass
def setPort(*args, **kwargs):
pass
def setType(*args, **kwargs):
pass
def setUser(*args, **kwargs):
pass
def type(*args, **kwargs):
pass
def user(*args, **kwargs):
pass
def applicationProxy(*args, **kwargs):
pass
def setApplicationProxy(*args, **kwargs):
pass
CachingCapability = None
Capabilities = None
Capability = None
DefaultProxy = None
FtpCachingProxy = None
HostNameLookupCapability = None
HttpCachingProxy = None
HttpProxy = None
ListeningCapability = None
NoProxy = None
ProxyType = None
Socks5Proxy = None
TunnelingCapability = None
UdpTunnelingCapability = None
__new__ = None
class QSslCipher(_Object):
def __copy__(*args, **kwargs):
pass
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def __nonzero__(*args, **kwargs):
"""
x.__nonzero__() <==> x != 0
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def authenticationMethod(*args, **kwargs):
pass
def encryptionMethod(*args, **kwargs):
pass
def isNull(*args, **kwargs):
pass
def keyExchangeMethod(*args, **kwargs):
pass
def name(*args, **kwargs):
pass
def protocol(*args, **kwargs):
pass
def protocolString(*args, **kwargs):
pass
def supportedBits(*args, **kwargs):
pass
def usedBits(*args, **kwargs):
pass
__new__ = None
class QHostInfo(_Object):
def __copy__(*args, **kwargs):
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addresses(*args, **kwargs):
pass
def error(*args, **kwargs):
pass
def errorString(*args, **kwargs):
pass
def hostName(*args, **kwargs):
pass
def lookupId(*args, **kwargs):
pass
def setAddresses(*args, **kwargs):
pass
def setError(*args, **kwargs):
pass
def setErrorString(*args, **kwargs):
pass
def setHostName(*args, **kwargs):
pass
def setLookupId(*args, **kwargs):
pass
def abortHostLookup(*args, **kwargs):
pass
def fromName(*args, **kwargs):
pass
def localDomainName(*args, **kwargs):
pass
def localHostName(*args, **kwargs):
pass
HostInfoError = None
HostNotFound = None
NoError = None
UnknownError = None
__new__ = None
class QNetworkAddressEntry(_Object):
def __copy__(*args, **kwargs):
pass
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def broadcast(*args, **kwargs):
pass
def ip(*args, **kwargs):
pass
def netmask(*args, **kwargs):
pass
def prefixLength(*args, **kwargs):
pass
def setBroadcast(*args, **kwargs):
pass
def setIp(*args, **kwargs):
pass
def setNetmask(*args, **kwargs):
pass
def setPrefixLength(*args, **kwargs):
pass
__new__ = None
class QNetworkRequest(_Object):
def __copy__(*args, **kwargs):
pass
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def attribute(*args, **kwargs):
pass
def hasRawHeader(*args, **kwargs):
pass
def header(*args, **kwargs):
pass
def originatingObject(*args, **kwargs):
pass
def priority(*args, **kwargs):
pass
def rawHeader(*args, **kwargs):
pass
def rawHeaderList(*args, **kwargs):
pass
def setAttribute(*args, **kwargs):
pass
def setHeader(*args, **kwargs):
pass
def setOriginatingObject(*args, **kwargs):
pass
def setPriority(*args, **kwargs):
pass
def setRawHeader(*args, **kwargs):
pass
def setSslConfiguration(*args, **kwargs):
pass
def setUrl(*args, **kwargs):
pass
def sslConfiguration(*args, **kwargs):
pass
def url(*args, **kwargs):
pass
AlwaysCache = None
AlwaysNetwork = None
Attribute = None
AuthenticationReuseAttribute = None
Automatic = None
CacheLoadControl = None
CacheLoadControlAttribute = None
CacheSaveControlAttribute = None
ConnectionEncryptedAttribute = None
ContentDispositionHeader = None
ContentLengthHeader = None
ContentTypeHeader = None
CookieHeader = None
CookieLoadControlAttribute = None
CookieSaveControlAttribute = None
CustomVerbAttribute = None
DoNotBufferUploadDataAttribute = None
DownloadBufferAttribute = None
HighPriority = None
HttpPipeliningAllowedAttribute = None
HttpPipeliningWasUsedAttribute = None
HttpReasonPhraseAttribute = None
HttpStatusCodeAttribute = None
KnownHeaders = None
LastModifiedHeader = None
LoadControl = None
LocationHeader = None
LowPriority = None
Manual = None
MaximumDownloadBufferSizeAttribute = None
NormalPriority = None
PreferCache = None
PreferNetwork = None
Priority = None
RedirectionTargetAttribute = None
SetCookieHeader = None
SourceIsFromCacheAttribute = None
SynchronousRequestAttribute = None
User = None
UserMax = None
__new__ = None
class QAbstractNetworkCache(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def cacheSize(*args, **kwargs):
pass
def clear(*args, **kwargs):
pass
def data(*args, **kwargs):
pass
def insert(*args, **kwargs):
pass
def metaData(*args, **kwargs):
pass
def prepare(*args, **kwargs):
pass
def remove(*args, **kwargs):
pass
def updateMetaData(*args, **kwargs):
pass
__new__ = None
staticMetaObject = None
class QNetworkInterface(_Object):
def __copy__(*args, **kwargs):
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
| |
s.write('}')
s.newline()
def get_all_dependent_pkgs(search_path, context, package, indir):
msgs = msg_list_full_path(package, search_path, '.msg')
srvs = msg_list_full_path(package, search_path, '.srv')
all_pkgs = set()
for msg in msgs:
msg_name = os.path.basename(msg)
full_type = genmsg.gentools.compute_full_type_name(package, msg_name)
spec = genmsg.msg_loader.load_msg_from_file(context, msg, full_type)
pkgs, _, _ = find_requires(spec)
for pkg in pkgs:
all_pkgs.add(pkg)
for msg in srvs:
msg_name = os.path.basename(msg)
full_type = genmsg.gentools.compute_full_type_name(package, msg_name)
spec = genmsg.msg_loader.load_srv_from_file(context, msg, full_type)
pkgs, _, _ = find_requires(spec)
for pkg in pkgs:
all_pkgs.add(pkg)
# print('Package {}'.format(package))
# print('Package dependencies {}'.format(all_pkgs))
if package in all_pkgs:
all_pkgs.remove(package)
return all_pkgs
def write_pubspec(s, package, search_path, context, indir):
s.write('# Auto-generated. Do not edit!\n\n', newline=False)
s.write('# Updated: {}\n\n'.format(time.ctime()), newline=False)
s.write('name: {}'.format(package))
s.write('description: A ros {} message package for dartros'.format(package))
s.write('version: {}'.format(GenVersion))
if package == 'std_msgs' or package == 'rosgraph_msgs' or package == 'actionlib_msgs' or package == 'sensor_msgs' or package == 'geometry_msgs':
s.write('repository: https://github.com/TimWhiting/{}_dart'.format(package))
else:
s.write('publish_to: "none"')
msgs = msg_list(package, search_path, '.msg')
for m in msgs:
genmsg.load_msg_by_type(context, '%s/%s' %
(package, m), search_path)
srvs = msg_list(package, search_path, '.srv')
deps = get_all_dependent_pkgs(search_path, context, package, indir)
# msgExists = os.path.exists(pjoin(package_dir, 'lib/msgs.dart'))
# srvExists = os.path.exists(pjoin(package_dir, 'lib/srvs.dart'))
s.newline()
s.write('environment:')
with Indent(s):
s.write('sdk: ">=2.7.0 < 3.0.0"')
s.newline()
s.write('dependencies:')
with Indent(s):
s.write('buffer: ^1.0.6')
s.write('dartros: ^0.0.4+3')
for dep in deps:
if dep == 'std_msgs':
s.write('std_msgs: ^{}'.format(GenVersion))
elif dep == 'actionlib_msgs':
s.write('actionlib_msgs: ^{}'.format(GenVersion))
elif dep == 'rosgraph_msgs':
s.write('rosgraph_msgs: ^{}'.format(GenVersion))
elif dep == 'sensor_msgs':
s.write('sensor_msgs: ^{}'.format(GenVersion))
elif dep == 'geometry_msgs':
s.write('geometry_msgs: ^{}'.format(GenVersion))
else:
s.write('{}:'.format(dep))
with Indent(s):
s.write('path: ../{}'.format(dep))
s.newline()
def needs_update(infile, outfile):
if not os.path.exists(outfile):
return True
last_modified_input = os.path.getmtime(infile)
script_updated = os.path.getmtime(__file__.rstrip('c'))
outexists = os.path.exists(outfile)
if outexists and not DebugGen:
last_modified_output = os.path.getmtime(outfile)
return last_modified_input > last_modified_output or script_updated > last_modified_output
return True
def generate_all_msgs_for_package(package, output_dir, search_path):
path_package = find_path_from_cmake_path(
pjoin('share', package, 'msg'))
if (path_package is None):
# print('New package: {}'.format(package))
return
msgs = glob.glob(path_package + '/*.msg')
# print(msgs)
other_package = '{}/pubspec.yaml'.format(pjoin(output_dir, package))
# print('Other package {}'.format(other_package))
if not needs_update(other_package, other_package): # If generation script hasn't changed
pass
# print(t)
# print(time.time())
# print('Skipping package generation for {}'.format(package))
elif package not in generated_packages:
generated_packages.add(package)
generate_msg(package, msgs, pjoin(output_dir, package), search_path)
else:
pass
def generate_msg(pkg, files, out_dir, search_path):
"""
Generate dart code for all messages in a package
"""
# print('Generated packages {}'.format(generated_packages))
msg_context = MsgContext.create_default()
for f in files:
f = os.path.abspath(f)
infile = os.path.basename(f)
full_type = genmsg.gentools.compute_full_type_name(pkg, infile)
spec = genmsg.msg_loader.load_msg_from_file(msg_context, f, full_type)
if spec.short_name == 'String':
spec.short_name = 'StringMessage'
generate_msg_from_spec(msg_context, spec, search_path, out_dir, pkg, f)
indir = os.path.dirname(files[0])
########################################
# 3. Write the package pubspec.yaml.dart file
########################################
io = StringIO()
s = IndentedWriter(io)
write_pubspec(s, pkg, search_path, msg_context, indir)
package_update = True
pubspec = '{}/pubspec.yaml'.format(out_dir)
mode = 'w+'
if os.path.isfile(pubspec):
mode = 'r+'
with open(pubspec, mode) as f:
if f.read() == io.getvalue() and time.time() - os.path.getmtime(pubspec) < 5:
# print('Pubspec identical')
package_update = False
if package_update:
with open(pubspec, 'w+') as f:
f.write(io.getvalue())
import subprocess
try:
# print('running pub upgrade in {}'.format(out_dir))
subprocess.check_output('which pub', shell=True)
p = subprocess.Popen(['pub', 'upgrade'], cwd=out_dir, stdout=subprocess.PIPE)
p.wait()
except subprocess.CalledProcessError as e:
pass
io.close()
(directory, pack) = psplit(out_dir)
if len(search_path.keys()) == 0:
return
for package in search_path.keys():
if package != pkg and package is not None:
# new_search = deepcopy(search_path)
# new_search.pop(package)
generate_all_msgs_for_package(package, directory, search_path)
def generate_srv(pkg, files, out_dir, search_path):
"""
Generate dart code for all services in a package
"""
msg_context = MsgContext.create_default()
for f in files:
f = os.path.abspath(f)
infile = os.path.basename(f)
full_type = genmsg.gentools.compute_full_type_name(pkg, infile)
spec = genmsg.msg_loader.load_srv_from_file(msg_context, f, full_type)
if '.action' in f:
print('Action class')
return
generate_srv_from_spec(msg_context, spec, search_path, out_dir, pkg, f)
indir = os.path.dirname(files[0])
########################################
# 3. Write the package pubspec.yaml file
########################################
io = StringIO()
s = IndentedWriter(io)
write_pubspec(s, pkg, search_path, msg_context, indir)
package_update = True
pubspec = '{}/pubspec.yaml'.format(out_dir)
mode = 'w+'
if os.path.isfile(pubspec):
mode = 'r+'
with open(pubspec, mode) as f:
if f.read() == io.getvalue() and time.time() - os.path.getmtime(pubspec) < 5:
# print('Pubspec identical')
package_update = False
if package_update:
with open(pubspec, 'w+') as f:
f.write(io.getvalue())
import subprocess
try:
# print('running pub upgrade in {}'.format(out_dir))
subprocess.check_output('which pub', shell=True)
p = subprocess.Popen(['pub', 'upgrade'], cwd=out_dir, stdout=subprocess.PIPE)
p.wait()
except subprocess.CalledProcessError as e:
pass
io.close()
def msg_list(pkg, search_path, ext):
dir_list = search_path[pkg]
files = []
for d in dir_list:
files.extend([f for f in os.listdir(d) if f.endswith(ext)])
return [f[:-len(ext)] for f in files]
def msg_list_full_path(pkg, search_path, ext):
dir_list = search_path[pkg]
files = []
for d in dir_list:
files.extend([pjoin(d,f) for f in os.listdir(d) if f.endswith(ext)])
return files
def generate_action_from_spec(msg_context, spec, search_path, output_dir, package, action_type='action'):
io = StringIO()
s = IndentedWriter(io)
write_begin(s, spec)
write_extra_action_requires(s, spec)
external_deps, _ = write_requires(s, spec, search_path, output_dir)
write_class(s, spec, action=action_type)
write_serialize(s, spec)
write_deserialize(s, spec)
write_get_message_size(s, spec, search_path)
write_ros_datatype(s, spec)
write_md5sum(s, msg_context, spec)
write_message_definition(s, msg_context, spec)
if action_type == 'action':
write_action_extras(s, msg_context, spec)
write_end(s, spec)
src_dir = output_dir + '/lib/src/msgs'
if (not os.path.exists(src_dir)):
# if we're being run concurrently, the above test can report false but os.makedirs can still fail if
# another copy just created the directory
try:
os.makedirs(src_dir)
except OSError as e:
pass
with open('%s/lib/src/msgs/%s.dart' % (output_dir, spec.short_name), 'w') as f:
f.write(io.getvalue() + "\n")
io.close()
def generate_msg_from_spec(msg_context, spec, search_path, output_dir, package, infile, msgs=None):
"""
Generate a message
@param msg_path: The path to the .msg file
@type msg_path: str
"""
output_file = '%s/lib/src/msgs/%s.dart' % (output_dir, spec.short_name)
if not needs_update(infile, output_file):
return
genmsg.msg_loader.load_depends(msg_context, spec, search_path)
spec.actual_name = spec.short_name
for field in spec.parsed_fields():
if field.name == spec.actual_name:
field.name = spec.actual_name + 'Value'
spec.component_type = 'message'
msgs = msg_list(package, search_path, '.msg')
for m in msgs:
genmsg.load_msg_by_type(msg_context, '%s/%s' %
(package, m), search_path)
msg = spec.short_name
if msg + 'Goal' in msgs and msg + 'Feedback' in msgs and msg + 'Result' in msgs:
return generate_action_from_spec(msg_context, spec, search_path, output_dir, package)
elif len(msg.split('ActionGoal')) > 1:
return generate_action_from_spec(msg_context, spec, search_path, output_dir, package, action_type='goal')
elif len(msg.split('ActionFeedback')) > 1:
return generate_action_from_spec(msg_context, spec, search_path, output_dir, package, action_type='feedback')
elif len(msg.split('ActionResult')) > 1:
return generate_action_from_spec(msg_context, spec, search_path, output_dir, package, action_type='result')
########################################
# 1. Write the .dart file
########################################
io = StringIO()
s = IndentedWriter(io)
write_begin(s, spec)
external_deps, _ = write_requires(s, spec, search_path, output_dir)
write_class(s, spec)
write_serialize(s, spec)
write_deserialize(s, spec)
write_get_message_size(s, spec, search_path)
write_ros_datatype(s, spec)
write_md5sum(s, msg_context, spec)
write_message_definition(s, msg_context, spec)
write_end(s, spec)
src_dir = output_dir + '/lib/src/msgs'
if (not os.path.exists(src_dir)):
# if we're being run concurrently, the above test can report false but os.makedirs can still fail if
# another copy just created the directory
try:
os.makedirs(src_dir)
except OSError as e:
pass
with open(output_file, 'w') as f:
f.write(io.getvalue() + "\n")
io.close()
########################################
# 3. Write the msgs.dart file
# This is being rewritten once per msg
# file, which is inefficient
########################################
io = StringIO()
s = IndentedWriter(io)
# print(srvs)
write_msg_export(s, msgs, package, search_path)
with open('{}/lib/msgs.dart'.format(output_dir), 'w') as f:
f.write(io.getvalue())
io.close()
# TODO most of this could probably be refactored into being shared with messages
def generate_srv_from_spec(msg_context, spec, search_path, output_dir, package, path):
"Generate code from .srv file"
output_file = '%s/lib/src/srvs/%s.dart' % (output_dir, spec.short_name)
if not needs_update(path, output_file):
return
genmsg.msg_loader.load_depends(msg_context, spec, search_path)
ext = '.srv'
srv_path = os.path.dirname(path)
srvs = msg_list(package, {package: [srv_path]}, ext)
for srv in srvs:
load_srv_from_file(msg_context, '%s/%s%s' %
(srv_path, srv, ext), '%s/%s' % (package, srv))
src_dir = output_dir + '/lib/src/srvs'
if (not os.path.exists(src_dir)):
# if we're being run concurrently, the above test can report false but os.makedirs can still fail if
# another copy just created the directory
try:
os.makedirs(src_dir)
except OSError as e:
pass
########################################
# 1. Write the .dart file
########################################
io = StringIO()
s = IndentedWriter(io)
write_begin(s, spec, True)
found_packages, local_deps = write_requires(
s, spec.request, search_path, output_dir, None, None, True)
write_requires(s, spec.response, search_path, output_dir,
found_packages, local_deps, True)
spec.request.actual_name = '%sRequest' % spec.short_name
spec.response.actual_name = '%sResponse' % spec.short_name
write_srv_component(s, spec.request, msg_context, spec, search_path)
write_srv_component(s, spec.response, msg_context, spec, search_path)
write_srv_end(s, msg_context, spec)
with open('%s/lib/src/srvs/%s.dart' % (output_dir, spec.short_name), 'w') as f:
f.write(io.getvalue())
io.close()
########################################
# 3. Write the srvs.dart file
# This is being rewritten once per msg
# file, which is inefficient
########################################
io = StringIO()
s = IndentedWriter(io)
# print(srvs)
write_srv_export(s, srvs, package)
with open('{}/lib/srvs.dart'.format(output_dir), 'w') | |
"""
"""
# Libraries
import numpy as np
import pandas as pd
# Pint libraries
from pint.errors import UndefinedUnitError
# DataBlend libraries
from datablend.core.settings import textwrapper
from datablend.core.settings import ureg
from datablend.utils.pandas import nanunique
from datablend.utils.pandas_schema import schema_from_json
# ---------------------------------------------------
# Constants
# ---------------------------------------------------
TRANSFORMATIONS_STACK = [
'range_correction',
'order_magnitude_correction',
'replace_correction',
'static_correction',
'fillna_correction',
'unique_true_value_correction'
]
TRANSFORMATIONS_TIDY = [
'range_correction',
'order_magnitude_correction',
'replace_correction',
'static_correction',
'fillna_correction',
#'compound_feature_correction',
'unique_true_value_correction'
]
TRANSFORMATION_GROUPBY = [
'static_correction',
'fillna_correction',
'unique_true_value_correction'
]
# ---------------------------------------------------
# Helper methods
# ---------------------------------------------------
# Transformation functions
def mode(series):
""""""
print(type(series))
if series.isnull().all():
return np.nan
return series.mode()[0]
def fbfill(x):
"""Computes forward and then backward fill."""
return x.ffill().bfill()
def bffill(x):
"""Computes backward and then forward fill"""
return x.bfill(x).ffill(x)
TRANSFORMATIONS = {
'mode': mode,
'fbfill': fbfill,
'bffill': bffill
}
def str2func(d):
"""This method passes strings to functions
Parameters
---------
d: dict
Dictionary where value is a function name."""
# Create deep dictionary copy
if isinstance(d, str):
if d in TRANSFORMATIONS:
return TRANSFORMATIONS[d]
# Return
return d
def swap_day_month(x):
"""This method...
.. note: Check that day/month can be swapped by
ensuring they are in the range (1, 12)
.. note: Should I return nan?
"""
if (x.day > 12) or (x.month > 12):
return np.nan
return x.replace(month=x.day, day=x.month)
def add_to_date(x, year=0, month=0, day=0):
"""This method...
.. note: Should I return nan?
"""
try:
return x.replace(year=x.year+year,
month=x.month+month,
day=x.day+day)
except:
return x
# --------------------------------------------------------------------
# Corrections
# --------------------------------------------------------------------
def fillna_correction(series, **kwargs):
"""Corrects filling nan with a strategy
.. note: Generalise to get function and pass arguments!
Examples
--------
# Fill nan
tidy.abdominal_pain =
tidy.groupby(by=['StudyNo']) \
.abdominal_pain.fillna(False)
"""
if 'method' in kwargs:
if kwargs['method'] == 'bffill':
return series.transform(bffill)
if kwargs['method'] == 'fbfill':
return series.transform(fbfill)
return series.fillna(**kwargs)
def static_correction(series, method, **kwargs):
"""Corrects filling with a consistent value.
.. note: Mode might return a series with two values with the
same frequency and only the first will be considered.
Example
-------
tidy.shock = \
tidy.groupby(by='StudyNo').shock \
.transform(static_correction, method='max')
Parameters
----------
method: string
The method which can be a function or a string supported
by the pandas apply function such as [max, min, median,
mean, mode]
"""
# The series is static already
if series.nunique(dropna=False) == 1:
return series
# Get value to fill with.
value = series.apply(method)
# For mode a series is returned
if isinstance(value, pd.Series):
value = value[0]
# Transform
transform = series.copy(deep=True)
transform.update(np.repeat(value, len(series)))
# Return
return transform
def replace_correction(series, **kwargs):
"""Corrects replacing values"""
return series.replace(**kwargs)
def order_magnitude_correction(series, range, orders=[10, 100]):
"""Corrects issues with order of magnitudes.
Data manually collected often has one/two degrees of magnitude
higher because one or two digits are pressed accidentally. It
also happens if the comma was no pressed properly.
Examples
--------
tidy.body_temperature = tidy.body_temperature \
.transform(order_magnitude_correction range=(20, 50))
Parameters
----------
series: pd.Series
The series to correct.
orders: list
The orders of magnitude to try.
range:
The desired range to accept the correction.
Returns
-------
pd.Series
"""
# Create transform
transform = pd.to_numeric(series.copy(deep=True))
# Range
low, high = range
# Loop
for i in orders:
aux = (transform / i)
idx = aux.between(low, high)
transform[idx] = aux[idx]
# Return
return transform
def range_correction(series, range=None, value=np.nan):
"""Corrects issues with ranges.
Some values collected are not within the ranges. They could
also be removed using the IQR rule, but if we know the limits
we can filter them as errors instead of outliers.
.. todo: Warn if replace value is outside range.
.. todo: Include several options for value:
value=np.nan
value=number
value=(low, high)
value='edges'
.. todo: If transformation to numeric fails show error!
Example
-------
tidy.dbp = \
tidy.dbp.transform(range_correction, range=(40, 100))
Parameters
----------
series:
range:
value:
Returns
-------
pd.Series
"""
# Create transform
transform = pd.to_numeric(series.copy(deep=True))
# Range
low, high = range
# Correction
transform[~transform.between(low, high)] = value
# Return
return transform
def category_correction(series, **kwargs):
"""Corrects weird categories!
.. note: Can be done using the replace_correction?
"""
pass
def causal_correction(x, y):
#if x is one then y must be one.
pass
def compound_feature_correction(series, compound):
"""Corrects compound boolean features.
Some values are collected either in subcategories or a
final compound category (e.g. bleeding, bleeding_skin
and bleeding_mucosal). It might happen that there are
inconsistencies between these data collection.
The bleeding other assumes that if there is already
one bleeding collected that agrees with bleeding, then
it was collected with that purpose and it is set to false
.. warning: Works with pd.NA but not with np.nan!
.. note: To create sample dataframe.
from itertools import product
v = [True, False, np.nan]
a = [v, v, v]
combos = pd.DataFrame(list(product(*a)))
combos = combos.convert_dtypes()
Parameters
----------
series: pd.Series
The series to correct
compound: pd.DataFrame
The elements to consider
Returns
-------
pd.Series
Examples
--------
# Correct compound feature bleeding (careful use pd.NA)
tidy.bleeding = \
compound_feature_correction(tidy.bleeding,
tidy[['bleeding_skin',
'bleeding_mucosal',
'bleeding_nose',
'bleeding_skin',
'bleeding_urine',
'bleeding_vaginal',
'bleeding_vensite']])
Equivalent:
bleeding = bleeding |
tidy.bleeding_gi | \
tidy.bleeding_gum | \
tidy.bleeding_mucosal | \
tidy.bleeding_nose | \
tidy.bleeding_skin | \
tidy.bleeding_urine | \
tidy.bleeding_vaginal | \
tidy.bleeding_vensite
"""
# Copy data
transform = series.copy(deep=True)
# Convert to dtypes
transform = transform.convert_dtypes()
# Any true
any = compound.convert_dtypes().any(axis=1)
# Set transform
transform = transform | any
# other = transform & ~any
# Return
return transform
def unique_true_value_correction(series, value=np.nan, **kwargs):
"""Corrects more than one True appearance.
For example, for variable representing events such as
event_admission where only one value should be True
during the data collection period.
.. note: If len(series) <=1 return series
.. note: Set to value=np.nan or value=False
.. note: What if there is no true value?
.. note: Rename to one_true_value_correction
Examples
--------
tidy.event_admission = \
tidy.groupby(by=['StudyNo']) \
.event_admission \
.transform(unique_true_value_correction)
Parameters
----------
series: pd.Series
**kwargs:
Argument keep to pass to duplicated function. The possible
values are ['first', 'last', 'false'].
Returns
-------
"""
# Check series is of type bool
# No need to convert to boolean
# transform = series.apply(bool)
transform = series.copy(deep=True)
# There is no true value!
if transform.sum() == 0:
print("No value found!")
return series
# It is already unique
if transform.sum() == 1:
return series
# More than one
transform[transform.duplicated(**kwargs)] = value
# Return
return transform
def date_corrections(x, years=None, use_swap_day_month=True):
"""Applies various possible date corrections
Parameters
----------
x:
years:
swap_day_month:
Returns
-------
"""
# Original value
corrections = [x]
# Swapping day month
corrections += [swap_day_month(x)]
corrections += [add_to_date(x, year=1)]
corrections += [add_to_date(x, year=-1)]
corrections += [add_to_date(x, month=1)]
corrections += [add_to_date(x, month=-1)]
# Range of possible years
if years is not None:
corrections += [x.replace(year=y) for y in years]
# Return
return pd.Series(pd.Series(corrections).unique())
def date_outliers_correction(series,
max_days_to_median=20,
outliers_as_nat=False):
"""
This method...
.. warning: The selection of the first column should not be
necessary. It should work just with the indx.
series[outliers] = r[idx].iloc[:, 0]
.. todo: Include different modes to compute the outliers
and different methods to correct the dates if
required:
outliers = np.abs(series - series.mean()) > coef * series.std()
outliers = np.abs(series - series.median()) > coef * series.std()
Parameters
----------
series
max_day_difference
Returns
-------
"""
# Compute days of difference between day and median
outliers = (series - series.median()) \
.dt.days.abs() > max_days_to_median
# Return original
if not outliers.any():
return series
# Unique years
years = series[~outliers].dt.year.unique()
# Compute various corrections
r = series[outliers].apply(\
date_corrections, years=years)
# Compute days
r_days = (r - series.median()).abs()
r_days = r_days / np.timedelta64(1, 'D')
# Date closer enough not found
if not (r_days < max_days_to_median).any(axis=1).any():
if outliers_as_nat:
transform = series.copy(deep=True)
transform[outliers] = pd.NaT
"""
print("------")
print(r_days)
print()
print(r)
print()
print(series.dt.normalize().median())
print()
print(series.dt.normalize().value_counts())
"""
# Find index with smaller days of difference
idx = (r - series.median()).abs().idxmin(axis=1)
# Replace in series
transform = series.copy(deep=True)
transform[outliers] = r[idx].iloc[:, 0]
# Return transformed
return transform
def outlier_dates_correction(series, coef=2.0):
"""Corrects the dates that are outliers.
It receives all the dates in which samples were collected,
for example for a patient and tries to (i) identify
outliers and (ii) correct them with the best possible
date.
.. note: Using mean/std for outliers...
.. note: Should I use days which is more interpretable?
.. warning: Remember to include always the raw value
just in case that was the best! Should I
check only values that are outside | |
<reponame>martinhoefling/smbprotocol<gh_stars>0
import pytest
from smbprotocol.security_descriptor import AccessAllowedAce, \
AccessDeniedAce, AceType, AclPacket, AclRevision, SDControl, SIDPacket, \
SMB2CreateSDBuffer, SystemAuditAce
class TestSIDPacket(object):
def test_create_message(self):
sid = "S-1-1-0"
message = SIDPacket()
message.from_string(sid)
expected = b"\x01" \
b"\x01" \
b"\x00\x00" \
b"\x00\x00\x00\x01" \
b"\x00\x00\x00\x00"
actual = message.pack()
assert len(message) == 12
assert actual == expected
assert str(message) == sid
def test_create_domain_sid(self):
sid = "S-1-5-21-3242954042-3778974373-1659123385-1104"
message = SIDPacket()
message.from_string(sid)
expected = b"\x01" \
b"\x05" \
b"\x00\x00" \
b"\x00\x00\x00\x05" \
b"\x15\x00\x00\x00" \
b"\x3a\x8d\x4b\xc1" \
b"\xa5\x92\x3e\xe1" \
b"\xb9\x36\xe4\x62" \
b"\x50\x04\x00\x00"
actual = message.pack()
assert len(message) == 28
assert actual == expected
assert str(message) == sid
def test_parse_string_fail_no_s(self):
sid = SIDPacket()
with pytest.raises(ValueError) as exc:
sid.from_string("A-1-1-0")
assert str(exc.value) == "A SID string must start with S-"
def test_parse_string_fail_too_small(self):
sid = SIDPacket()
with pytest.raises(ValueError) as exc:
sid.from_string("S-1")
assert str(exc.value) == "A SID string must start with S and contain" \
" a revision and identifier authority, e.g." \
" S-1-0"
def test_parse_message(self):
actual = SIDPacket()
data = b"\x01" \
b"\x01" \
b"\x00\x00" \
b"\x00\x00\x00\x01" \
b"\x00\x00\x00\x00"
actual.unpack(data)
assert len(actual) == 12
assert str(actual) == "S-1-1-0"
assert actual['revision'].get_value() == 1
assert actual['sub_authority_count'].get_value() == 1
assert actual['reserved'].get_value() == 0
assert actual['identifier_authority'].get_value() == 1
sub_auth = actual['sub_authorities'].get_value()
assert isinstance(sub_auth, list)
assert len(sub_auth) == 1
assert sub_auth[0] == 0
def test_parse_message_domain_sid(self):
actual = SIDPacket()
data = b"\x01" \
b"\x05" \
b"\x00\x00" \
b"\x00\x00\x00\x05" \
b"\x15\x00\x00\x00" \
b"\x3a\x8d\x4b\xc1" \
b"\xa5\x92\x3e\xe1" \
b"\xb9\x36\xe4\x62" \
b"\x50\x04\x00\x00"
actual.unpack(data)
assert len(actual) == 28
assert str(actual) == "S-1-5-21-3242954042-3778974373-1659123385-1104"
assert actual['revision'].get_value() == 1
assert actual['sub_authority_count'].get_value() == 5
assert actual['reserved'].get_value() == 0
assert actual['identifier_authority'].get_value() == 5
sub_auth = actual['sub_authorities'].get_value()
assert isinstance(sub_auth, list)
assert len(sub_auth) == 5
assert sub_auth[0] == 21
assert sub_auth[1] == 3242954042
assert sub_auth[2] == 3778974373
assert sub_auth[3] == 1659123385
assert sub_auth[4] == 1104
class TestAccessAllowedAce(object):
def test_create_message(self):
sid = SIDPacket()
sid.from_string("S-1-1-0")
message = AccessAllowedAce()
message['mask'] = 2032127
message['sid'] = sid
expected = b"\x00" \
b"\x00" \
b"\x14\x00" \
b"\xff\x01\x1f\x00" \
b"\x01" \
b"\x01" \
b"\x00\x00" \
b"\x00\x00\x00\x01" \
b"\x00\x00\x00\x00"
actual = message.pack()
assert len(message) == 20
assert actual == expected
def test_parse_message(self):
actual = AccessAllowedAce()
data = b"\x00" \
b"\x00" \
b"\x14\x00" \
b"\xff\x01\x1f\x00" \
b"\x01" \
b"\x01" \
b"\x00\x00" \
b"\x00\x00\x00\x01" \
b"\x00\x00\x00\x00"
data = actual.unpack(data)
assert len(actual) == 20
assert data == b""
assert actual['ace_type'].get_value() == \
AceType.ACCESS_ALLOWED_ACE_TYPE
assert actual['ace_flags'].get_value() == 0
assert actual['ace_size'].get_value() == 20
assert actual['mask'].get_value() == 2032127
assert str(actual['sid'].get_value()) == "S-1-1-0"
class TestAccessDeniedAce(object):
def test_create_message(self):
sid = SIDPacket()
sid.from_string("S-1-1-0")
message = AccessDeniedAce()
message['mask'] = 2032127
message['sid'] = sid
expected = b"\x01" \
b"\x00" \
b"\x14\x00" \
b"\xff\x01\x1f\x00" \
b"\x01" \
b"\x01" \
b"\x00\x00" \
b"\x00\x00\x00\x01" \
b"\x00\x00\x00\x00"
actual = message.pack()
assert len(message) == 20
assert actual == expected
def test_parse_message(self):
actual = AccessDeniedAce()
data = b"\x01" \
b"\x00" \
b"\x14\x00" \
b"\xff\x01\x1f\x00" \
b"\x01" \
b"\x01" \
b"\x00\x00" \
b"\x00\x00\x00\x01" \
b"\x00\x00\x00\x00"
data = actual.unpack(data)
assert len(actual) == 20
assert data == b""
assert actual['ace_type'].get_value() == AceType.ACCESS_DENIED_ACE_TYPE
assert actual['ace_flags'].get_value() == 0
assert actual['ace_size'].get_value() == 20
assert actual['mask'].get_value() == 2032127
assert str(actual['sid'].get_value()) == "S-1-1-0"
class TestSystemAuditAce(object):
def test_create_message(self):
sid = SIDPacket()
sid.from_string("S-1-1-0")
message = SystemAuditAce()
message['mask'] = 2032127
message['sid'] = sid
expected = b"\x02" \
b"\x00" \
b"\x14\x00" \
b"\xff\x01\x1f\x00" \
b"\x01" \
b"\x01" \
b"\x00\x00" \
b"\x00\x00\x00\x01" \
b"\x00\x00\x00\x00"
actual = message.pack()
assert len(message) == 20
assert actual == expected
def test_parse_message(self):
actual = SystemAuditAce()
data = b"\x02" \
b"\x00" \
b"\x14\x00" \
b"\xff\x01\x1f\x00" \
b"\x01" \
b"\x01" \
b"\x00\x00" \
b"\x00\x00\x00\x01" \
b"\x00\x00\x00\x00"
data = actual.unpack(data)
assert len(actual) == 20
assert data == b""
assert actual['ace_type'].get_value() == AceType.SYSTEM_AUDIT_ACE_TYPE
assert actual['ace_flags'].get_value() == 0
assert actual['ace_size'].get_value() == 20
assert actual['mask'].get_value() == 2032127
assert str(actual['sid'].get_value()) == "S-1-1-0"
class TestAclPacket(object):
def test_create_message(self):
sid1 = SIDPacket()
sid1.from_string("S-1-1-0")
sid2 = SIDPacket()
sid2.from_string("S-1-5-21-3242954042-3778974373-1659123385-1104")
ace1 = AccessAllowedAce()
ace1['mask'] = 2032127
ace1['sid'] = sid1
ace2 = AccessAllowedAce()
ace2['mask'] = 2032127
ace2['sid'] = sid2
# define an illegal ACE for tests to see if it is flexible for custom
# aces'
ace3 = AccessAllowedAce()
ace3['ace_type'] = AceType.ACCESS_ALLOWED_OBJECT_ACE_TYPE
ace3['sid'] = sid1
message = AclPacket()
message['aces'] = [
ace1, ace2, ace3.pack()
]
expected = b"\x02" \
b"\x00" \
b"\x54\x00" \
b"\x03\x00" \
b"\x00\x00" \
b"\x00" \
b"\x00" \
b"\x14\x00" \
b"\xff\x01\x1f\x00" \
b"\x01" \
b"\x01" \
b"\x00\x00" \
b"\x00\x00\x00\x01" \
b"\x00\x00\x00\x00" \
b"\x00" \
b"\x00" \
b"\x24\x00" \
b"\xff\x01\x1f\x00" \
b"\x01" \
b"\x05" \
b"\x00\x00" \
b"\x00\x00\x00\x05" \
b"\x15\x00\x00\x00" \
b"\x3a\x8d\x4b\xc1" \
b"\xa5\x92\x3e\xe1" \
b"\xb9\x36\xe4\x62" \
b"\x50\x04\x00\x00" \
b"\x05" \
b"\x00" \
b"\x14\x00" \
b"\x00\x00\x00\x00" \
b"\x01" \
b"\x01" \
b"\x00\x00" \
b"\x00\x00\x00\x01" \
b"\x00\x00\x00\x00"
actual = message.pack()
assert len(message) == 84
assert actual == expected
def test_parse_message(self):
actual = AclPacket()
data = b"\x02" \
b"\x00" \
b"\x54\x00" \
b"\x03\x00" \
b"\x00\x00" \
b"\x00" \
b"\x00" \
b"\x14\x00" \
b"\xff\x01\x1f\x00" \
b"\x01" \
b"\x01" \
b"\x00\x00" \
b"\x00\x00\x00\x01" \
b"\x00\x00\x00\x00" \
b"\x00" \
b"\x00" \
b"\x24\x00" \
b"\xff\x01\x1f\x00" \
b"\x01" \
b"\x05" \
b"\x00\x00" \
b"\x00\x00\x00\x05" \
b"\x15\x00\x00\x00" \
b"\x3a\x8d\x4b\xc1" \
b"\xa5\x92\x3e\xe1" \
b"\xb9\x36\xe4\x62" \
b"\x50\x04\x00\x00" \
b"\x05" \
b"\x00" \
b"\x14\x00" \
b"\x00\x00\x00\x00" \
b"\x01" \
b"\x01" \
b"\x00\x00" \
b"\x00\x00\x00\x01" \
b"\x00\x00\x00\x00"
actual.unpack(data)
assert len(actual) == 84
assert actual['acl_revision'].get_value() == AclRevision.ACL_REVISION
assert actual['sbz1'].get_value() == 0
assert actual['acl_size'].get_value() == 84
assert actual['ace_count'].get_value() == 3
assert actual['sbz2'].get_value() == 0
aces = actual['aces'].get_value()
assert len(aces) == 3
assert aces[0]['ace_type'].get_value() == \
AceType.ACCESS_ALLOWED_ACE_TYPE
assert aces[0]['ace_flags'].get_value() == 0
assert aces[0]['ace_size'].get_value() == 20
assert aces[0]['mask'].get_value() == 2032127
assert str(aces[0]['sid'].get_value()) == "S-1-1-0"
assert aces[1]['ace_type'].get_value() == \
AceType.ACCESS_ALLOWED_ACE_TYPE
assert aces[1]['ace_flags'].get_value() == 0
assert aces[1]['ace_size'].get_value() == 36
assert aces[1]['mask'].get_value() == 2032127
assert str(aces[1]['sid'].get_value()) == \
"S-1-5-21-3242954042-3778974373-1659123385-1104"
assert isinstance(aces[2], bytes)
assert aces[2] == b"\x05\x00\x14\x00\x00\x00\x00\x00" \
b"\x01\x01\x00\x00\x00\x00\x00\x01" \
b"\x00\x00\x00\x00"
class TestSMB2SDBuffer(object):
def test_create_message(self):
sid1 = SIDPacket()
sid1.from_string("S-1-1-0")
sid2 = SIDPacket()
sid2.from_string("S-1-5-21-3242954042-3778974373-1659123385-1104")
ace1 = AccessAllowedAce()
ace1['mask'] = 2032127
ace1['sid'] = sid1
ace2 = AccessAllowedAce()
ace2['mask'] = 2032127
ace2['sid'] = sid2
acl = AclPacket()
acl['aces'] = [
ace1, ace2
]
message = SMB2CreateSDBuffer()
message['control'].set_flag(SDControl.SELF_RELATIVE)
message.set_dacl(acl)
message.set_owner(sid2)
message.set_group(sid1)
message.set_sacl(None)
expected = b"\x01" \
b"\x00" \
b"\x04\x80" \
b"\x54\x00\x00\x00" \
b"\x70\x00\x00\x00" \
b"\x00\x00\x00\x00" \
b"\x14\x00\x00\x00" \
b"\x02" \
b"\x00" \
b"\x40\x00" \
b"\x02\x00" \
b"\x00\x00" \
b"\x00" \
b"\x00" \
b"\x14\x00" \
b"\xff\x01\x1f\x00" \
b"\x01" \
b"\x01" \
b"\x00\x00" \
b"\x00\x00\x00\x01" \
b"\x00\x00\x00\x00" \
b"\x00" \
b"\x00" \
b"\x24\x00" \
b"\xff\x01\x1f\x00" \
b"\x01" \
b"\x05" \
b"\x00\x00" \
b"\x00\x00\x00\x05" \
b"\x15\x00\x00\x00" \
b"\x3a\x8d\x4b\xc1" \
b"\xa5\x92\x3e\xe1" \
b"\xb9\x36\xe4\x62" \
b"\x50\x04\x00\x00" \
b"\x01\x05" \
b"\x00\x00" \
b"\x00\x00\x00\x05" \
b"\x15\x00\x00\x00" \
b"\x3a\x8d\x4b\xc1" \
b"\xa5\x92\x3e\xe1" \
b"\xb9\x36\xe4\x62" \
b"\x50\x04\x00\x00" \
b"\x01" \
b"\x01" \
b"\x00\x00" \
b"\x00\x00\x00\x01" \
b"\x00\x00\x00\x00"
actual = message.pack()
assert len(message) == 124
assert actual == expected
def test_create_message_sacl_group(self):
sid = SIDPacket()
sid.from_string("S-1-1-0")
ace = AccessAllowedAce()
ace['sid'] = sid
acl = AclPacket()
acl['aces'] = [ace]
message = SMB2CreateSDBuffer()
message.set_dacl(None)
message.set_owner(None)
message.set_group(sid)
message.set_sacl(acl)
expected = b"\x01" \
b"\x00" \
b"\x10\x00" \
b"\x00\x00\x00\x00" \
b"\x14\x00\x00\x00" \
b"\x20\x00\x00\x00" \
b"\x00\x00\x00\x00" \
b"\x01" \
b"\x01" \
b"\x00\x00" \
b"\x00\x00\x00\x01" \
b"\x00\x00\x00\x00" \
b"\x02" \
b"\x00" \
b"\x1c\x00" \
b"\x01\x00" \
b"\x00\x00" \
b"\x00" \
b"\x00" \
b"\x14\x00" \
b"\x00\x00\x00\x00" \
b"\x01" \
b"\x01" \
b"\x00\x00" \
b"\x00\x00\x00\x01" \
b"\x00\x00\x00\x00"
actual = message.pack()
assert len(message) == 60
assert actual == expected
def test_parse_message_sacl_group(self):
actual = SMB2CreateSDBuffer()
data = b"\x01" \
b"\x00" \
b"\x10\x00" \
b"\x00\x00\x00\x00" \
b"\x14\x00\x00\x00" \
b"\x20\x00\x00\x00" \
b"\x00\x00\x00\x00" \
b"\x01" \
b"\x01" \
b"\x00\x00" \
b"\x00\x00\x00\x01" \
b"\x00\x00\x00\x00" \
b"\x02" \
b"\x00" \
b"\x1c\x00" \
b"\x01\x00" \
b"\x00\x00" \
b"\x00" \
b"\x00" \
b"\x14\x00" \
b"\x00\x00\x00\x00" \
b"\x01" \
b"\x01" \
b"\x00\x00" \
b"\x00\x00\x00\x01" \
b"\x00\x00\x00\x00"
actual.unpack(data)
assert len(actual) == 60
assert actual['revision'].get_value() == 1
assert actual['sbz1'].get_value() == 0
assert actual['control'].get_value() == 16
assert actual['offset_owner'].get_value() == 0
assert actual['offset_group'].get_value() == 20
assert actual['offset_sacl'].get_value() == 32
assert actual['offset_dacl'].get_value() == 0
assert len(actual['buffer']) == 40
assert not actual.get_owner()
assert str(actual.get_group()) == "S-1-1-0"
sacl = actual.get_sacl()
assert sacl['acl_revision'].get_value() == AclRevision.ACL_REVISION
assert sacl['sbz1'].get_value() == 0
assert sacl['acl_size'].get_value() == 28
assert sacl['ace_count'].get_value() == 1
assert sacl['sbz2'].get_value() == 0
saces = sacl['aces'].get_value()
assert isinstance(saces, list)
assert len(saces) == 1
assert saces[0]['ace_type'].get_value() == \
AceType.ACCESS_ALLOWED_ACE_TYPE
assert | |
in the calculation array for a model
places startposition for each variable in model.allvar[variable]['startpos']
places the max startposition in model.maxstart '''
if self.maxstart == 0:
variabler = (x for x in sorted(self.allvar.keys()))
start = 0
for v, m in ((v, self.allvar[v]['maxlag']) for v in variabler):
self.allvar[v]['startnr'] = start
start = start+(-int(m))+1
# print(v.ljust(self.maxnavlen),str(m).rjust(6),str(self.allvar[v]['start']).rju
self.maxstart = start
def make_gaussline(self, vx, nodamp=False):
''' takes a list of terms and translates to a line in a gauss-seidel solver for
simultanius models
the variables are mapped to position in a vector which has all relevant varaibles lagged
this is in order to provide opertunity to optimise data and solving
New version to take hand of several lhs variables. Dampning is not allowed for
this. But can easely be implemented by makeing a function to multiply tupels
'''
termer = self.allvar[vx]['terms']
assigpos = self.allvar[vx]['assigpos']
if nodamp:
ldamp = False
else:
# convention for damping equations
if 'Z' in self.allvar[vx]['frmlname'] or pt.kw_frml_name(self.allvar[vx]['frmlname'], 'DAMP'):
assert assigpos == 1, 'You can not dampen equations with several left hand sides:'+vx
endovar = [t.op if t.op else ('a['+str(self.allvar[t.var]['startnr'])+']')
for j, t in enumerate(termer) if j <= assigpos-1]
# to implemet dampning of solution
damp = '(1-alfa)*('+''.join(endovar)+')+alfa*('
ldamp = True
else:
ldamp = False
out = []
for i, t in enumerate(termer[:-1]): # drop the trailing $
if t.op:
out.append(t.op.lower())
if i == assigpos and ldamp:
out.append(damp)
if t.number:
out.append(t.number)
elif t.var:
lag = int(t.lag) if t.lag else 0
out.append('a['+str(self.allvar[t.var]['startnr']-lag)+']')
if ldamp:
out.append(')') # the last ) in the dampening
res = ''.join(out)
return res
def make_resline(self, vx):
''' takes a list of terms and translates to a line calculating line
'''
termer = self.allvar[vx]['terms']
assigpos = self.allvar[vx]['assigpos']
out = []
for i, t in enumerate(termer[:-1]): # drop the trailing $
if t.op:
out.append(t.op.lower())
if t.number:
out.append(t.number)
elif t.var:
lag = int(t.lag) if t.lag else 0
if i < assigpos:
out.append('b['+str(self.allvar[t.var]['startnr']-lag)+']')
else:
out.append('a['+str(self.allvar[t.var]['startnr']-lag)+']')
res = ''.join(out)
return res
def createstuff3(self, dfxx):
''' Connect a dataframe with the solution vector used by the iterative sim2 solver)
return a function to place data in solution vector and to retrieve it again. '''
columsnr = {v: i for i, v in enumerate(dfxx.columns)}
pos0 = sorted([(self.allvar[var]['startnr']-lag, (var, lag, columsnr[var]))
for var in self.allvar for lag in range(0, -1+int(self.allvar[var]['maxlag']), -1)])
# if problems check if find_pos has been calculated
posrow = np.array([lag for (startpos, (var, lag, colpos)) in pos0])
poscol = np.array([colpos for (startpos, (var, lag, colpos)) in pos0])
poscolendo = [columsnr[var] for var in self.endogene]
posstartendo = [self.allvar[var]['startnr'] for var in self.endogene]
def stuff3(values, row, ljit=False):
'''Fills a calculating vector with data,
speeded up by using dataframe.values '''
if ljit:
# a = np.array(values[posrow+row,poscol],dtype=np.dtype('f8'))
# a = np.ascontiguousarray(values[posrow+row,poscol],dtype=np.dtype('f8'))
a = np.ascontiguousarray(
values[posrow+row, poscol], dtype=np.dtype('f8'))
else:
# a = values[posrow+row,poscol]
# a = np.array(values[posrow+row,poscol],dtype=np.dtype('f8'))
a = np.ascontiguousarray(
values[posrow+row, poscol], dtype=np.dtype('f8'))
return a
def saveeval3(values, row, vector):
values[row, poscolendo] = vector[posstartendo]
return stuff3, saveeval3
def outsolve(self, order='', exclude=[]):
''' returns a string with a function which calculates a
Gauss-Seidle iteration of a model
exclude is list of endogeneous variables not to be solved
uses:
model.solveorder the order in which the variables is calculated
model.allvar[v]["gauss"] the ccalculation
'''
short, long, longer = 4*' ', 8*' ', 12 * ' '
solveorder = order if order else self.solveorder
fib1 = ['def make(funks=[]):']
fib1.append(short + 'from modeluserfunk import ' +
(', '.join(pt.userfunk)).lower())
fib1.append(short + 'from modelBLfunk import ' +
(', '.join(pt.BLfunk)).lower())
funktext = [short+f.__name__ + ' = funks[' +
str(i)+']' for i, f in enumerate(self.funks)]
fib1.extend(funktext)
fib1.append(short + 'def los(a,alfa):')
f2 = (long + self.make_gaussline(v) for v in solveorder
if (v not in exclude) and (not self.allvar[v]['dropfrml']))
fib2 = [long + 'return a ']
fib2.append(short+'return los')
out = '\n'.join(chain(fib1, f2, fib2))
return out
def make_solver(self, ljit=False, order='', exclude=[], cache=False):
''' makes a function which performs a Gaus-Seidle iteration
if ljit=True a Jittet function will also be created.
The functions will be placed in:
model.solve
model.solve_jit '''
a = self.outsolve(order, exclude) # find the text of the solve
exec(a, globals()) # make the factory defines
# using the factory create the function
self.solve = make(funks=self.funks)
if ljit:
print('Time for a cup of coffee')
self.solve_jit = jit(
"f8[:](f8[:],f8)", cache=cache, fastmath=True)(self.solve)
return
def base_sim(self, databank, start='', slut='', max_iterations=1, first_test=1, ljit=False, exclude=[], silent=False, new=False,
conv=[], samedata=True, dumpvar=[], ldumpvar=False,
dumpwith=15, dumpdecimal=5, lcython=False, setbase=False,
setlast=True, alfa=0.2, sim=True, absconv=0.01, relconv=0.00001,
debug=False, stats=False, **kwargs):
''' solves a model with data from a databank if the model has a solve function else it will be created.
The default options are resonable for most use:
:start,slut: Start and end of simulation, default as much as possible taking max lag into acount
:max_iterations : Max interations
:first_test: First iteration where convergence is tested
:ljit: If True Numba is used to compile just in time - takes time but speeds solving up
:new: Force creation a new version of the solver (for testing)
:exclude: Don't use use theese foormulas
:silent: Suppres solving informations
:conv: Variables on which to measure if convergence has been achived
:samedata: If False force a remap of datatrframe to solving vector (for testing)
:dumpvar: Variables to dump
:ldumpvar: toggels dumping of dumpvar
:dumpwith: with of dumps
:dumpdecimal: decimals in dumps
:lcython: Use Cython to compile the model (experimental )
:alfa: Dampning of formulas marked for dampning (<Z> in frml name)
:sim: For later use
:absconv: Treshold for applying relconv to test convergence
:relconv: Test for convergence
:debug: Output debug information
:stats: Output solving statistics
'''
sol_periode = self.smpl(start, slut, databank)
self.check_sim_smpl(databank)
self.findpos()
# fill all Missing value with 0.0
databank = insertModelVar(databank, self)
with self.timer('create stuffer and gauss lines ', debug) as t:
if (not hasattr(self, 'stuff3')) or (not self.eqcolumns(self.simcolumns, databank.columns)):
self.stuff3, self.saveeval3 = self.createstuff3(databank)
self.simcolumns = databank.columns.copy()
with self.timer('Create solver function', debug) as t:
if ljit:
if not hasattr(self, 'solve_jit'):
self.make_solver(ljit=True, exclude=exclude)
this_solve = self.solve_jit
else:
if not hasattr(self, 'solve'):
self.make_solver(exclude=exclude)
this_solve = self.solve
values = databank.values.copy()
# columsnr=self.get_columnsnr(databank)
ittotal = 0 # total iteration counter
# convvar = [conv] if isinstance(conv,str) else conv if conv != [] else list(self.endogene)
convvar = self.list_names(self.endogene, conv)
convplace = [self.allvar[c]['startnr']
for c in convvar] # this is how convergence is measured
if ldumpvar:
dump = convvar if dumpvar == [] else self.vlist(dumpvar)
dumpplac = [self.allvar[v]['startnr'] for v in dump]
dumphead = ' '.join(
[('{:>'+str(dumpwith)+'}').format(d) for d in dump])
starttime = time.time()
for periode in sol_periode:
row = databank.index.get_loc(periode)
with self.timer('stuffing', debug) as tt:
a = self.stuff3(values, row, ljit)
# b=self.stuff2(values,row,columsnr)
# assert all(a == b)
if ldumpvar:
print('\nStart solving', periode)
print(' '+dumphead)
print('Start '+' '.join(
[('{:>'+str(dumpwith)+',.'+str(dumpdecimal)+'f}').format(a[p]) for p in dumpplac]))
jjj = 0
for j in range(max_iterations):
jjj = j+1
if debug:
print('iteration :', j)
with self.timer('iteration '+str(jjj), debug) as tttt:
itbefore = a[convplace].copy()
a = this_solve(a, alfa)
if ldumpvar:
print('Iteration {:>3}'.format(
j)+' '.join([('{:>'+str(dumpwith)+',.'+str(dumpdecimal)+'f}').format(a[p]) for p in dumpplac]))
if j > first_test:
itafter = a[convplace].copy()
convergence = True
for after, before in zip(itafter, itbefore):
# print(before,after)
if before > absconv and abs(after-before)/abs(before) > relconv:
convergence = False
break
if convergence:
if not silent:
print(periode, 'Solved in ', j, 'iterations')
break
else:
itbefore = itafter.copy()
else:
print('No convergence ', periode, ' after', jjj, ' iterations')
with self.timer('saving', debug) as t:
# self.saveeval2(values,row,columsnr,a) # save the result
self.saveeval3(values, row, a) # save the result
ittotal = ittotal+jjj
if not silent:
print(self.name, ': Solving finish from ',
sol_periode[0], 'to', sol_periode[-1])
outdf = pd.DataFrame(values, index=databank.index,
columns=databank.columns)
if stats:
numberfloats = self.calculate_freq[-1][1]*ittotal
endtime = time.time()
simtime = endtime-starttime
print('{:<40}: {:>15,}'.format(
'Floating point operations :', self.calculate_freq[-1][1]))
print('{:<40}: {:>15,}'.format('Total iterations :', ittotal))
print('{:<40}: {:>15,}'.format(
'Total floating point operations', numberfloats))
print('{:<40}: {:>15,.2f}'.format(
'Simulation time (seconds) ', simtime))
if simtime > 0.0:
print('{:<40}: {:>15,.0f}'.format(
'Floating point operations per second', numberfloats/simtime))
| |
<reponame>seandong37tt4qu/jeszhengq
#!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN 'AS IS' BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
"""
Time:
Author:
Description: Check database operation
"""
import math
from aops_database.proxy.proxy import ElasticsearchProxy
from aops_database.conf.constant import CHECK_RULE_INDEX, CHECK_RESULT_INDEX
from aops_database.function.helper import judge_return_code
from aops_utils.log.log import LOGGER
from aops_utils.restful.status import DATABASE_DELETE_ERROR, DATABASE_INSERT_ERROR,\
DATABASE_QUERY_ERROR, SUCCEED
class CheckDatabase(ElasticsearchProxy):
"""
Check related es operation
"""
def add_check_rule(self, data):
"""
Add check rule
Args:
data(dict): e.g.
{
"username": "admin",
"check_items: [
{
"check_item": "",
"data_list": [],
"condition": "",
"description": "",
"plugin": ""
}
]
}
Returns:
int: status code
dict
"""
username = data['username']
check_items = data['check_items']
result = {
"succeed_list": [],
"fail_list": [],
"update_list": []
}
query_body = self._general_body(data)
query_body['query']['bool']['must'].append(
{"match": {"check_item": ""}})
for check_item in check_items:
# query first, if the same check item exists, update and return True
if self._update_check_rule(check_item, query_body, result):
continue
# since it's a new check item, do insert
check_item['username'] = username
self._add_check_rule(check_item, result)
status_code = judge_return_code(result, DATABASE_INSERT_ERROR)
return status_code, result
def _update_check_rule(self, data, query_body, result):
"""
Update check rule accoring to the check item name
Args:
data(dict): check item
query_body(dict): query DSL
result(dict): record
Returns:
bool: return True if need updated
"""
item_name = data.get('check_item')
query_body['query']['bool']['must'][1]["match"]["check_item"] = item_name
res = self.query(CHECK_RULE_INDEX, query_body)
if res[0] and len(res[1]['hits']['hits']) > 0:
LOGGER.warning(
"check rule [%s] has existed, choose to update it", item_name)
_id = res[1]['hits']['hits'][0]['_id']
action = [{"_id": _id, "doc": data}]
res = self.update_bulk(CHECK_RULE_INDEX, action)
if res:
LOGGER.info("update check rule [%s] succeed", item_name)
result['update_list'].append(item_name)
else:
LOGGER.error("update check rule [%s] fail", item_name)
result['fail_list'].append(item_name)
return True
return False
def _add_check_rule(self, check_item, result):
"""
Insert check rule into database
Args:
check_item(dict): check item
result(dict): record
"""
item_name = check_item.get('check_item')
res = self.insert(CHECK_RULE_INDEX, check_item)
if res:
LOGGER.info("insert check rule [%s] succeed", item_name)
result['succeed_list'].append(item_name)
else:
LOGGER.error("insert check rule [%s] fail", item_name)
result['fail_list'].append(item_name)
def delete_check_rule(self, data):
"""
Delete check rule
Args:
data(dict): e.g.
{
"username": "admin",
"check_items": ["item1", "item2"]
}
Returns:
int: status code
"""
check_items = data.get('check_items')
body = self._general_body(data)
body["query"]["bool"]["must"].append(
{"terms": {"check_item": check_items}})
res = self.delete(CHECK_RULE_INDEX, body)
if res:
LOGGER.info("delete check rule %s succeed", check_items)
return SUCCEED
LOGGER.error("delete check rule %s fail", check_items)
return DATABASE_DELETE_ERROR
def get_check_rule(self, data):
"""
Get check rule
Args:
data(dict): e.g.
{
"username": "admin"
"check_items": ["item1", "item2"],
"sort": "tree_name",
"direction": "asc",
"page": 1,
"per_page": 11
}
Returns:
int: status code
dict: result
"""
result = {
"total_count": 0,
"check_items": [],
"total_page": 0,
}
query_body = self._generate_query_rule_body(data)
status_code, total_count = self._get_rule_count(query_body)
if status_code != SUCCEED or total_count == 0:
return status_code, result
flag, total_page, res = self._query_or_scan(
CHECK_RULE_INDEX, query_body, total_count, data,
['check_item', 'data_list', 'condition', 'description', 'plugin'])
if res[0]:
LOGGER.debug("query check rule succeed")
result["total_count"] = total_count
result["total_page"] = total_page
# query by es scan
if flag:
result['check_items'] = res[1]
# query by es query
else:
for item in res[1]['hits']['hits']:
result["check_items"].append(item['_source'])
return SUCCEED, result
LOGGER.error("query check rule fail")
return DATABASE_QUERY_ERROR, result
def get_rule_count(self, data):
"""
Get check rule count
Args:
data(dict): e.g.
{
"username": "admin"
}
Returns:
int: status code
dict: result
"""
result = {
"rule_count": 0
}
query_body = self._generate_query_rule_body(data)
status_code, total_count = self._get_rule_count(query_body)
if total_count == 0:
LOGGER.warning("there is no matched check items")
result["rule_count"] = total_count
return status_code, result
def _generate_query_rule_body(self, data):
"""
Generate check rule query body
Args:
data(dict)
Returns:
dict: query body
"""
query_body = self._general_body(data)
check_items = data.get('check_items')
if check_items:
query_body["query"]["bool"]["must"].append(
{"terms": {"check_item": check_items}})
return query_body
def _get_rule_count(self, body):
"""
Get check rule count
Args:
body(dict): query body
Returns:
int: status code
int: result
"""
count_res = self.count(CHECK_RULE_INDEX, body)
if not count_res[0]:
LOGGER.error("query count of check rule fail")
return DATABASE_QUERY_ERROR, 0
return SUCCEED, count_res[1]
def save_check_result(self, data):
"""
Save check result
Args:
data(dict): e.g.
{
"check_results": [
{
"username": "admin"
"host_id": "host1",
"data_list": ["data1", "data2"],
"start": 1,
"end": 2,
"check_item": "item1",
"condition": "sxx",
"value": "xx"
}
]
}
Returns:
int: status code
"""
just_insert, need_update = self._split_results(data)
if all([self.insert_bulk(CHECK_RESULT_INDEX, just_insert),
self.update_bulk(CHECK_RESULT_INDEX, need_update)]):
LOGGER.debug("save or update check result succeed")
return SUCCEED
LOGGER.error("save or update check result fail")
return DATABASE_INSERT_ERROR
def _split_results(self, data):
"""
Judge whether the result needs to be inserted or updated
Args:
data(dict)
Returns:
list: data that need inserted
list: data that need updated
"""
check_results = data.get("check_results")
just_insert = []
need_update = []
for check_result in check_results:
query_body = {
"query": {
"bool": {
"must": [
{"match": {"username": check_result['username']}},
{"match": {"host_id": check_result['host_id']}},
{"match": {
"check_item": check_result['check_item']}},
{"match": {"start": check_result['start']}},
{"match": {"end": check_result['end']}}
]
}
}
}
res = self.query(CHECK_RESULT_INDEX, query_body)
if res[0] and len(res[1]['hits']['hits']) != 0:
LOGGER.debug("query check result succeed")
doc = {
"value": check_result["value"]
}
_id = res[1]['hits']['hits'][0]['_id']
need_update.append({"_id": _id, "doc": doc})
else:
just_insert.append(check_result)
return just_insert, need_update
def delete_check_result(self, data):
"""
Delete check result
Args:
data(dict): e.g.
{
"username": "admin",
"host_list": ["id1", "id2"],
"time_range": [111, 222]
}
Returns:
int: status code
"""
body = self._generate_delete_result_body(data)
res = self.delete(CHECK_RESULT_INDEX, body)
if res:
LOGGER.debug("delete check result succeed")
return SUCCEED
LOGGER.error("delete check result fail")
return DATABASE_DELETE_ERROR
def _generate_delete_result_body(self, data):
"""
Generate query body
Args:
data(dict)
Returns:
dict: query body
"""
host_list = data.get('host_list')
time_range = data.get('time_range')
check_items = data.get('check_items')
query_body = self._general_body(data)
if check_items:
query_body["query"]["bool"]["must"].append(
{"terms": {"check_item": check_items}})
if host_list:
query_body["query"]["bool"]["must"].append(
{"terms": {"host_id": host_list}})
if time_range and len(time_range) == 2:
query_body["query"]["bool"]["must"].extend(
[{"range": {
"start": {"gte": time_range[0]}
}
},
{"range": {
"end": {"lte": time_range[1]}
}
}
])
return query_body
def get_check_result(self, data):
"""
Get check result
Args:
data(dict): e.g.
{
"username": "admin",
"time_range": [1, 3],
"host_list": ['id1', 'id2'],
"check_items": ["item1"],
"sort": "check_item",
"direction": "asc",
"page": 1,
"per_page": 11
}
Returns:
int: status code
dict: result
"""
result = {
"total_page": 0,
"total_count": 0,
"check_result": []
}
query_body = self._generate_query_result_body(data)
count_res = self.count(CHECK_RESULT_INDEX, query_body)
if not count_res[0]:
LOGGER.error("query count of check result fail")
return DATABASE_QUERY_ERROR, result
if count_res[1] == 0:
LOGGER.warning("there is no matched check result")
return SUCCEED, result
total_count = count_res[1]
flag, total_page, res = self._query_or_scan(
CHECK_RESULT_INDEX, query_body, total_count, data,
['check_item', 'data_list', 'condition',
'value', 'description', 'host_id', 'start', 'end'])
if res[0]:
LOGGER.debug("query check result succeed")
result["total_count"] = total_count
result["total_page"] = total_page
# query by es scan
if flag:
result['check_result'] = res[1]
# query by es query
else:
for item in res[1]['hits']['hits']:
result["check_result"].append(item['_source'])
return SUCCEED, result
LOGGER.error("query check result fail")
return DATABASE_QUERY_ERROR, result
def _generate_query_result_body(self, data):
"""
Generate query body
Args:
data(dict)
Returns:
dict: query body
"""
host_list = data.get('host_list')
time_range = data.get('time_range')
check_items = data.get('check_items')
value = data.get('value')
query_body = self._general_body(data)
# only show abnormal
if value:
query_body["query"]["bool"]["must"].append(
{"match": {"value": value}})
if host_list:
query_body["query"]["bool"]["must"].append(
{"terms": {"host_id": host_list}})
if check_items:
query_body["query"]["bool"]["must"].append(
{"terms": {"check_item": check_items}})
if time_range and len(time_range) == 2:
query_body["query"]["bool"]["must"].extend(
[{"range": {
"start": {"lte": time_range[1]}
}
},
{"range": {
"end": {"gte": time_range[0]}
}
}
])
query_body["query"]["bool"]["should"] = [
{"range": {
"start": {"gte": time_range[0]}
}
},
{"range": {
"end": {"lte": time_range[1]}
}
}
]
return query_body
def get_check_result_count(self, data):
"""
Get check result count
Args:
data(dict): e.g.
{
"username": "admin",
"host_list": ['id1', 'id2'],
"sort": "count",
"direction": "asc",
"page": 1,
"per_page": 11
}
Returns:
int: status code
dict: result
"""
result = {
"results": [],
"total_count": 0,
"total_page": 0,
}
query_body = self._generate_count_body(data)
res = self.query(CHECK_RESULT_INDEX, query_body)
if res[0]:
LOGGER.debug("query check result succeed")
total_count = len(res[1]['aggregations']['count']['buckets'])
page = data.get('page')
per_page = data.get("per_page")
start = 0
end = total_count
total_page = 1
if page and per_page:
total_page = math.ceil(total_count / per_page)
start = (page - 1) * per_page
end = min(start + per_page, total_count)
buckets = res[1]['aggregations']['count']['buckets'][start:end]
for bucket in buckets:
result['results'].append(
{"host_id": bucket['key'], "count": bucket['doc_count']})
result['total_count'] = total_count
result['total_page'] = total_page
return SUCCEED, result
LOGGER.error("query check result fail")
return DATABASE_QUERY_ERROR, result
def _generate_count_body(self, data):
"""
Generate result count | |
deprel: {word.deprel} :: misc: {word.misc}")
#the following get generated over function to use different attributes of the words (see function for more info)
child_val, tuple_child_val = get_comparison_attributes(word, sub_tuple[child_index], eric)
deprel_val, tuple_deprel_val = get_comparison_attributes(word, sub_tuple[deprel_index], eric, default="deprel")
#test_stuff.logger(f"{tab*5}vals: {child_val},{tuple_child_val}, {deprel_val}, {tuple_deprel_val}")
child_matched = True if child_val.lower() == tuple_child_val.lower() else False
deprel_matched = True if deprel_val.lower() == tuple_deprel_val.lower() else False
#just to not look up the mother if the match already failed
if child_matched and deprel_matched:
mother = get_mother(word, tree)
mother_val, tuple_mother_val = get_comparison_attributes(mother, sub_tuple[mother_index], eric)
mother_matched = True if mother_val.lower() == tuple_mother_val.lower() else False
else:
mother_matched = False
#if all three categories are a match, the subtuple is a match
if child_matched and deprel_matched and mother_matched:
used_words.append(word.id)
sub_tuple_correct = True
break #no need to match the other words. match next tuple instead
#if one of the sub_tuples is correct it's a match for the whole tuple, so no need to match the others
if sub_tuple_correct:
match_sub_tuples.append(sub_tuple)
tuple_correct = True
break
#if one tuple in a template does not match, the whole template does not match, so no need to go on
if not tuple_correct:
template_match = False
break
#collect all template matches
if template_match:
tmp = (d["id"], match_sub_tuples)
all_matches.append(tmp)
#returns a list of tuples with two elements each: 1st fct_id, 2nd the tree template that matched, i.e. a list of tuples
#largest template tree will be element 0
if eric.prioritise_negation:
ret_val = prioritise_negation(all_matches)
else:
ret_val = sorted(all_matches, key=lambda item: len(item[1]), reverse=True)
return ret_val
#expects a list of tuples with two elements each: 1st fct_id, 2nd the tree template that matched, i.e. a list of tuples
#that list should represend a ranking from most likely (lowest index) to least likey (highest index)
#it then goes through all templates and sorts them into templates that contain a lemma:not and and those that do not
#then creates a ranking again for both, separately
#then, both lists get concatenated with the negated tuples at the lower indices. So a short but negated template will have priority over a longer, non-negated one
#returns that list
def prioritise_negation(templates_list):
negated_tuples = []
non_negated_tuples = []
for template in templates_list:
negated = False
for tpl in template[1]:
head = tpl[0]
child = tpl[2]
if isinstance(head, list):
if f"lemma{cd}not" in head or "not" in head:
negated = True
break
else:
if f"lemma{cd}not" == head or "not" == head:
negated = True
break
if isinstance(child, list):
if f"lemma{cd}not" in child or "not" in child:
negated = True
break
else:
if f"lemma{cd}not" == child or "not" == child:
negated = True
break
if negated:
negated_tuples.append(template)
else:
non_negated_tuples.append(template)
negated_tuples = sorted(negated_tuples, key=lambda item: len(item[1]), reverse=True)
non_negated_tuples = sorted(non_negated_tuples, key=lambda item: len(item[1]), reverse=True)
ranked_list = negated_tuples + non_negated_tuples
return ranked_list
#t is a tree like in tree_compare(t1, t2)
def dictionary_templates_test(tree):
#indices of tuples in templates
tmother = 0 #mother node
tdeprel = 1 #dependency relation
tchild = 2 #child node
root = ""
for x in tree.words:
if x.head == 0:
root = x
break
if not root:
test_stuff.logger("no root found:")
test_stuff.logger(tree.words)
#test_stuff.logger("Testing Tree:")
for d in nlp_dictionary:
test_stuff.logger(f"MATCHING TO {d['id']}")
if "depparse" not in d.keys():
continue
for dep_template in d["depparse"]:
correct_tupel_counter = 0 #if correct match, correct_tupel_counter should be equal to the number of elements in dep_template
#test_stuff.logger(f"\t\t template {template_counter}")
for tup in dep_template:
found_mother = False
found_child = False
found_deprel = False
#test_stuff.logger(f"\t\t\t{tup}")
child_is_list = True if isinstance(tup[tchild], list) else False
deprel_is_list = True if isinstance(tup[tdeprel], list) else False
if tup[tmother] == "root":
root_correct = False
if child_is_list:
if root.text in tup[tchild]:
root_correct = True
elif root.text == tup[tchild]:
root_correct = True
#else:
#test_stuff.logger(f"\t\t\t\t {root.text} != {tup[tmother]}")
if root_correct:
found_mother = True
found_child = True
found_deprel = True
else:
#see if you find current tuple in t
for word in tree.words:
#check if word is a child node
if child_is_list:
if word.text in tup[tchild]:
found_child = True
else:
if word.text == tup[tchild]:
found_child = True
#check if mother and deprel match
#mother is a dictionary, just like a word
mother = get_word(f"{word.head}", tree.words)
if isinstance(mother, str):
mother_text = mother
else:
mother_text = mother.text
found_mother = True
if mother_text == tup[tmother]:
#check if deprel matches
if deprel_is_list:
if word.deprel in tup[tdeprel]:
found_deprel = True
else:
if word.deprel == tup[tdeprel]:
found_deprel = True
if found_mother and found_deprel and found_child:
break
if found_mother and found_deprel and found_child:
#test_stuff.logger("\t\t\t\t\t Tupel correct!")
correct_tupel_counter += 1
if correct_tupel_counter == len(dep_template):
#test_stuff.logger(f"///Found match ({d['id']}): {dep_template}\n")
return f"///Found match: {dep_template}\n"
else:
#test_stuff.logger(f"NO MATCH. mother: {found_mother}, deprel: {found_deprel}, child: {found_child}")
'''
("root", "root", "predicted"),
("predicted", "nsubj:pass", f"upos{category_tag}NOUN")
'''
def sentence_similarity(sent1, sent2, pipeline):
t1 = pipeline(sent1).sentences[0]
t2 = pipeline(sent2).sentences[0]
total, percent = tree_compare(t1, t2)
return total, percent
def print_depparsed_sentences(sentences, language="en", pipeline=""):
if not pipeline:
pipeline = init_stanza(language)
if isinstance(sentences, str):
sentences = [sentences]
output, _ = depparse(sentences, pipeline)
for i, o in enumerate(output):
print(f"{i}: {o}")
def debug_depparsed_sentences_to_console():
pipeline = init_stanza("de")
eric = eric_nlp.Eric_nlp()
sentence_list = ["Used sentences:"]
print("Please provide input:")
while True:
# for usr_in in whiletrue:
usr_in = input()
if not usr_in:
print("no input given")
continue
elif usr_in.lower() in ["exit", "exit()", "quit", "quit()", "end", "end()"]:
break
sentence_list.append(usr_in)
preprocessed = eric.preprocessing(usr_in, "usr_input")
print(f"preprocessed: {preprocessed}")
out, _ = depparse([preprocessed], pipeline)
root = ""
for o in out:
if "id: 0" in o:
finder = "word: "
ender = "lemma: "
index = o.find(finder) + len(finder)
index_end = o.find(ender)
root = o[index:index_end].strip()
if not root:
root = "root not found"
print(f"Root: {root}")
for o in out[3:]:
print(o)
print("Goodbye")
for sent in sentence_list:
print(sent)
def main():
debug_depparsed_sentences_to_console
quit()
input_language = "en"
stanza_pipeline = init_stanza(input_language)
eric = eric_nlp.Eric_nlp()
input_path = "data\\"
input_files = [f"{input_path}umfrage_input_{x}_cleaned.txt" for x in range(1,5)]
input_files.append(f"{input_path}manually_added.txt")
output_path = "output\\depparse\\data_analysis\\"
roots_out_file = f"{output_path}roots.csv"
input_accumulated = test_stuff.merge_input_files(input_files)#{x["id"]: x["key_sentences"] for x in nlp_dictionary}
input_accumulated = list(set(input_accumulated))
input_accumulated_as_dict = {}
for x in input_accumulated:
if x[0] in input_accumulated_as_dict.keys():
input_accumulated_as_dict[x[0]].append(x[1])
else:
input_accumulated_as_dict[x[0]] = [x[1]]
all_roots = dict() #keys are root words and the values are dicts where the keys are the function_id
for fct_id, unpreprocessed_sentences in input_accumulated_as_dict.items():
preprocessed_sentences = [eric.preprocessing(x, "usr_input") for x in unpreprocessed_sentences]
dep_output, roots = depparse(preprocessed_sentences, stanza_pipeline)
preface = [f"{v}: {k}" for k, v in roots.items()]
#extend all_roots
all_roots = extend_roots(all_roots, roots, fct_id)
all_output = ["Used Input:"] + input_files + ["\n"] + preface + dep_output
for o in all_output:
print(o)
create_roots_matrix(all_roots, roots_out_file, empty_cell="")
print(all_roots)
#for infi in input_files:
# input_data =
# test_input = [x[1] for x in test_stuff.read_input_from_file(f[0])]
# test_output = depparse("en", test_input)
# test_stuff.list_to_file(test_output, f[1])
def read_sentences_from_output(output_file):
stop_words = ["OUTPUT:", "Root:", "id:"]
file_lines = test_stuff.get_file_lines(output_file)
sentences = list()
for line in file_lines:
if line != "" and not line[0].isdigit() and line[0] != "=":
splitted = line.split()
if splitted[0] not in stop_words:
sentences.append(line)
return list(set(sentences))
'''
if you thought of new sentence while analysing the output and just depparsed them over debug console and included them in the output_file,
this function will help. It can read your originally used input again, then the output file, compare sentences and store all new ones, i.e. the manually analysed sentences in a new input_file.
Also, it will then overwrite the output file to update the root counts
'''
def update_depparse_output(input_files, output_file_overwrite, passed_fct_id, output_file_new_sentences="data\\manually_added.txt", sp=""):
#input_accumulated.extend([("why", "Why did you predict this outcome?"), ("why", "Why did you predict the outcome?")])
#1 get all three as dictionaries {passed_fct_id: list of sentences}
#1.1 originally used input
lines = test_stuff.merge_input_files(input_files)
lines = list(set(lines))
input_accumulated = convert_input_tuples_to_dict(lines)
#1.2 modified output
lines = read_sentences_from_output(output_file_overwrite)
output_accumulated = {passed_fct_id: lines}
#1.3 existing manually added sentences
lines = test_stuff.merge_input_files([output_file_new_sentences])
lines = list(set(lines))
manual_accumulated = convert_input_tuples_to_dict(lines)
#2 look for sentences in output_accumulated, that do not exist in input_accumulated and append these to manual_accumulated if they not already exist there
eric = eric_nlp.Eric_nlp()
for fct_id, sentences in output_accumulated.items():
if fct_id in input_accumulated.keys():
preprocessed_inputs = [eric.preprocessing(x, "usr_input") for x in input_accumulated[fct_id]]
for sent in sentences:
sentence = eric.preprocessing(sent, "usr_input")
if sentence not in | |
==> 1968532 (Roth -W)
# sh Y2H_Blastn.sh 2016-12-22_MiSeq 17544_S2_R1 17544_S2_R2 ../data/roth2016_control_set_plus_control.-100 17544_S2 > qjobs/qjob_2016-12-22_MiSeq_S2.sh # ==> 1541502 (Roth -A) from 2418986 (trimed seq) from xxx
# sh Y2H_Blastn.sh 2016-12-22_MiSeq 17545_S3_R1 17545_S3_R2 ../data/roth2016_control_set_plus_control.-100 17545_S3 > qjobs/qjob_2016-12-22_MiSeq_S3.sh # ==> 2190445 (Roth Seaprep -W)
# sh Y2H_Blastn.sh 2016-12-22_MiSeq 17546_S4_R1 17546_S4_R2 ../data/roth2016_control_set_plus_control.-100 17546_S4 > qjobs/qjob_2016-12-22_MiSeq_S4.sh # ==> 1877953 (Roth Seaprep -A)
#
### output/2017-02-04_MiSeq/Friedrich/ ## (not use because of contamination)
#
#
### output/2017-02-22_MiSeq/Friedrich/
#
# sh Y2H_Blastn.sh 2017-02-22_MiSeq S1_R1 S1_R2 ../data/roth2016_control_set_plus_control.-100 S1 > qjobs/qjob_2017-02-22_MiSeq_S1.sh # (Roth -W) # ==> 960245
# sh Y2H_Blastn.sh 2017-02-22_MiSeq S2_R1 S2_R2 ../data/roth2016_control_set_plus_control.-100 S2 > qjobs/qjob_2017-02-22_MiSeq_S2.sh # (Roth -A) # ==> 2024754
# sh Y2H_Blastn.sh 2017-02-22_MiSeq S3_R1 S3_R2 ../data/roth2016_control_set_plus_control.-100 S3 > qjobs/qjob_2017-02-22_MiSeq_S3.sh # (Roth -Q) # ==> 813912
#
### output/2017-03-03_MiSeq/Friedrich (Roth)
#
# sh Y2H_Blastn.sh 2017-03-03_MiSeq S1_R1 S1_R2 ../data/roth2016_control_set_plus_control.-100 S1 > qjobs/qjob_2017-03-03_MiSeq_S1.sh # (Roth -W) # ==> 1414435
# sh Y2H_Blastn.sh 2017-03-03_MiSeq S2_R1 S2_R2 ../data/roth2016_control_set_plus_control.-100 S2 > qjobs/qjob_2017-03-03_MiSeq_S2.sh # (Roth -A) # ==> 969159
# sh Y2H_Blastn.sh 2017-03-03_MiSeq S3_R1 S3_R2 ../data/roth2016_control_set_plus_control.-100 S3 > qjobs/qjob_2017-03-03_MiSeq_S3.sh # (Roth -Q) # ==> 1255002
# sh Y2H_Blastn.sh 2017-03-03_MiSeq S4_R1 S4_R2 ../data/roth2016_control_set_plus_control.-100 S4 > qjobs/qjob_2017-03-03_MiSeq_S4.sh # (Roth Aonly -W) to check toxicity # ==> 23
# sh Y2H_Blastn.sh 2017-03-03_MiSeq S5_R1 S5_R2 ../data/roth2016_control_set_plus_control.-100 S5 > qjobs/qjob_2017-03-03_MiSeq_S5.sh # (Roth Bonly -W) to check toxicity # ==> 169
# sh Y2H_Blastn.sh 2017-03-03_MiSeq S6_R1 S6_R2 ../data/roth2016_control_set_plus_control.-100 S6 > qjobs/qjob_2017-03-03_MiSeq_S6.sh # (Roth Bonly -A) to check auto-activation # ==> 1296
# sh Y2H_Blastn.sh 2017-03-03_MiSeq S7_R1 S7_R2 ../data/roth2016_control_set_plus_control.-100 S7 > qjobs/qjob_2017-03-03_MiSeq_S7.sh # (Roth Seaprep -Q) # ==> 734766
#
# 2017-06-08_MiSeq (P170 toxicity test and Roth Seaprep)
#
# sh Y2H_Blastn.sh 2017-06-08_MiSeq S49_R1 S49_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 S49 > qjobs/qjob_2017-06-08_MiSeq_S49.sh # (P170 Bonly -W) to check toxicity # ==> 9299
# sh Y2H_Blastn.sh 2017-06-08_MiSeq S50_R1 S50_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 S50 > qjobs/qjob_2017-06-08_MiSeq_S50.sh # (P170 Bonly -W-A) to check auto-activation # ==> 7991
# sh Y2H_Blastn.sh 2017-06-08_MiSeq S51_R1 S51_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 S51 > qjobs/qjob_2017-06-08_MiSeq_S51.sh # (P170 Aonly -W) to check toxicity # ==> 3965
# sh Y2H_Blastn.sh 2017-06-08_MiSeq S52_R1 S52_R2 ../data/roth2016_control_set_plus_control.-100 S52 > qjobs/qjob_2017-06-08_MiSeq_S52.sh # (Roth Seaprep -W) to check complexcity # ==> 2801344
#
#
# 2017-06-12_MiSeq (P170)
# sh Y2H_Blastn.sh 2017-06-12_MiSeq S53_R1 S53_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 S53 > qjobs/qjob_2017-06-08_MiSeq_S53.sh # (P170 -W) no selection # ==> 3250858
# sh Y2H_Blastn.sh 2017-06-12_MiSeq S54_R1 S54_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 S54 > qjobs/qjob_2017-06-08_MiSeq_S54.sh # (P170 -W-A) selection # ==> 3553164
#
### 2017-07-03_MiSeq : 60
# sh Y2H_Blastn.sh 2017-07-03_MiSeq 60_W_R1 60_W_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 60_W # ==> 2629509
# sh Y2H_Blastn.sh 2017-07-03_MiSeq 60_Q_R1 60_Q_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 60_Q # ==> 2507307
#
### 2017-07-03_MiSeq : 58
# sh Y2H_Blastn.sh 2017-07-03_MiSeq 58_AW_R1 58_AW_R2 ../data/E375pLT-20170706.-100 58_AW # ==> 219
# sh Y2H_Blastn.sh 2017-07-03_MiSeq 58_BW_R1 58_BW_R2 ../data/E375pLT-20170706.-100 58_BW # ==> 219
# sh Y2H_Blastn.sh 2017-07-03_MiSeq 58_BQ_R1 58_BQ_R2 ../data/E375pLT-20170706.-100 58_BQ" # ==> 306
#
# 2017-08-15_MiSeq (Test for Seaprep again)
# sh Y2H_Blastn.sh 2017-08-15_MiSeq S61_SWD_R1 S61_SWD_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 S61_SWD > qjobs/qjob_2017-08-15_MiSeq_S61_SWD.sh # SWD (P170 Seaprep -W) # ==> 2909222 out of 6012622 (48.4%)
# sh Y2H_Blastn.sh 2017-08-15_MiSeq S61_PWD_R1 S61_PWD_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 S61_PWD > qjobs/qjob_2017-08-15_MiSeq_S61_PWD.sh # SWD (P170 Plate -W) # ==> 1956131 out of 4473498 (43.7%)
# sh Y2H_Blastn.sh 2017-08-15_MiSeq S61_PQD_R1 S61_PQD_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 S61_PQD > qjobs/qjob_2017-08-15_MiSeq_S61_PQD.sh # SWD (P170 Plate -Q) # ==> 1684777 out of 3553019 (47.4%)
#
# 2017-08-22_MiSeq (Roth - Seaprep with selection)
# sh Y2H_Blastn.sh 2017-08-22_MiSeq S64-SWD_S1_R1 S64-SWD_S1_R2 ../data/roth2016_control_set_plus_control.-100 S64_SWD > qjobs/qjob_2017-08-22_MiSeq_S64_SWD.sh # SWD (Roth Seaprep -W) # ==> 1491017 out of 2562522 (58.2%) ==> 1450453 (For New)
# sh Y2H_Blastn.sh 2017-08-22_MiSeq S64-SA4D_S2_R1 S64-SA4D_S2_R2 ../data/roth2016_control_set_plus_control.-100 S64_SA4D > qjobs/qjob_2017-08-22_MiSeq_S64_SA4D.sh # SWD (Roth Seaprep +A1/4) # ==> 1177404 out of 2321645 (50.7%) ==> 1145754 (For New)
# sh Y2H_Blastn.sh 2017-08-22_MiSeq S64-SA8D_S3_R1 S64-SA8D_S3_R2 ../data/roth2016_control_set_plus_control.-100 S64_SA8D > qjobs/qjob_2017-08-22_MiSeq_S64_SA8D.sh # SWD (Roth Seaprep +A1/8) # ==> 1125083 out of 2076630 (54.2%)
# sh Y2H_Blastn.sh 2017-08-22_MiSeq S64-SQD_S4_R1 S64-SQD_S4_R2 ../data/roth2016_control_set_plus_control.-100 S64_SQD > qjobs/qjob_2017-08-22_MiSeq_S64_SQD.sh # SWD (Roth Seaprep -Q) # ==> 618177 out of 1437540 (43.0%)
#
# 2017-08-28_MiSeq (P170 - Seaprep)
# sh Y2H_Blastn.sh 2017-08-28_MiSeq S68_SWD_R1 S68_SWD_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 S68_SWD > qjobs/qjob_2017-08-28_MiSeq_S68_SWD.sh # SWD (P170 Seaprep -W) # 1999636
# sh Y2H_Blastn.sh 2017-08-28_MiSeq S68_SA4D_R1 S68_SA4D_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 S68_SA4D > qjobs/qjob_2017-08-28_MiSeq_S68_SA4D.sh # SA4D (P170 Seaprep +A1/4) # 1943064
# sh Y2H_Blastn.sh 2017-08-28_MiSeq S68_SA8D_R1 S68_SA8D_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 S68_SA8D > qjobs/qjob_2017-08-28_MiSeq_S68_SA8D.sh # SA8D (P170 Seaprep +A1/8) # 1372704
# sh Y2H_Blastn.sh 2017-08-28_MiSeq S68_SQD_R1 S68_SQD_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 S68_SQD > qjobs/qjob_2017-08-28_MiSeq_S68_SQD.sh # SQD (P170 Seaprep -Q) # 1247668
#
# 2017-08-30_MiSeq (P170 - Seaprep)
# sh Y2H_Blastn.sh 2017-08-30_MiSeq S65_SWD_R1 S65_SWD_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 S65_SWD > qjobs/qjob_2017-08-30_MiSeq_S65_SWD.sh # SWD (P170 Seaprep -W) # 1836282
# sh Y2H_Blastn.sh 2017-08-30_MiSeq S65_SAD_R1 S65_SAD_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 S65_SAD > qjobs/qjob_2017-08-30_MiSeq_S65_SAD.sh # SAD (P170 Seaprep +A?) # 2108754
# sh Y2H_Blastn.sh 2017-08-30_MiSeq S65_SQD_R1 S65_SQD_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 S65_SQD > qjobs/qjob_2017-08-30_MiSeq_S65_SQD.sh # SQD (P170 Seaprep -Q) # 1870105
#
# 2017-10-09_MiSeq (EGFR - 1)
# sh Y2H_Blastn.sh 2017-10-09_MiSeq S1_WD_R1 S1_WD_R2 ../data/EGFR_entry_clones.-100 S1_WD > qjobs/qjob_2017-10-09_MiSeq_S1_WD.sh # WD
# sh Y2H_Blastn.sh 2017-10-09_MiSeq S2_WD_R1 S2_WD_R2 ../data/EGFR_entry_clones.-100 S2_WD > qjobs/qjob_2017-10-09_MiSeq_S2_WD.sh
# sh Y2H_Blastn.sh 2017-10-09_MiSeq S3_A8D_R1 S3_A8D_R2 ../data/EGFR_entry_clones.-100 S3_A8D > qjobs/qjob_2017-10-09_MiSeq_S3_A8D.sh
# sh Y2H_Blastn.sh 2017-10-09_MiSeq S4_QD_R1 S4_QD_R2 ../data/EGFR_entry_clones.-100 S4_QD > qjobs/qjob_2017-10-09_MiSeq_S4_QD.sh
#
# sh Y2H_Blastn.sh 2017-10-16_MiSeq S1_WD_R1 S1_WD_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 S1_WD > qjobs/qjob_2017-10-16_MiSeq_S1_WD.sh # WD (P170 Seaprep -W)
# sh Y2H_Blastn.sh 2017-10-16_MiSeq S2_WD_R1 S2_WD_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 S2_WD > qjobs/qjob_2017-10-16_MiSeq_S2_WD.sh # WD (P170 Seaprep -W)
# sh Y2H_Blastn.sh 2017-10-16_MiSeq S3_AD_R1 S3_AD_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 S3_AD > qjobs/qjob_2017-10-16_MiSeq_S3_AD.sh # WD (P170 Seaprep -W)
# sh Y2H_Blastn.sh 2017-10-16_MiSeq S4_AD_R1 S4_AD_R2 ../data/P170_4_library_MGj5615-Jun-2017122014.-100 S4_AD > qjobs/qjob_2017-10-16_MiSeq_S4_AD.sh # WD (P170 Seaprep -W)
#
# 2017-10-30_MiSeq (R75 auto-activator)
# sh Y2H_Blastn.sh 2017-10-30_MiSeq S1_BWD_R1 S1_BWD_R2 ../data/roth2016_control_set_plus_control.-100 S1_BWD > qjobs/qjob_2017-10-30_MiSeq_S1_BWD.sh
# sh Y2H_Blastn.sh 2017-10-30_MiSeq S2_BA2D_R1 S2_BA2D_R2 ../data/roth2016_control_set_plus_control.-100 S2_BA2D > qjobs/qjob_2017-10-30_MiSeq_S2_BA2D.sh
# sh Y2H_Blastn.sh 2017-10-30_MiSeq S3_BQD_R1 S3_BQD_R2 ../data/roth2016_control_set_plus_control.-100 S3_BQD > qjobs/qjob_2017-10-30_MiSeq_S3_BQD.sh
# sh Y2H_Blastn.sh 2017-10-30_MiSeq S4_AWD_R1 S4_AWD_R2 ../data/roth2016_control_set_plus_control.-100 S4_AWD > qjobs/qjob_2017-10-30_MiSeq_S4_AWD.sh
#
# 2017-11-03_MiSeq (R75 technical repeat)
# sh Y2H_Blastn.sh 2017-11-03_MiSeq S1_W_R1 S1_W_R2 ../data/roth2016_control_set_plus_control.-100 S1_W > qjobs/qjob_2017-11-03_MiSeq_S1_W.sh
# sh Y2H_Blastn.sh 2017-11-03_MiSeq S2_Q_R1 S2_Q_R2 ../data/roth2016_control_set_plus_control.-100 S2_Q > qjobs/qjob_2017-11-03_MiSeq_S2_Q.sh
# sh Y2H_Blastn.sh 2017-11-03_MiSeq S3_W_R1 S3_W_R2 ../data/roth2016_control_set_plus_control.-100 S3_W > qjobs/qjob_2017-11-03_MiSeq_S3_W.sh
# sh Y2H_Blastn.sh 2017-11-03_MiSeq S4_Q_R1 S4_Q_R2 ../data/roth2016_control_set_plus_control.-100 S4_Q > qjobs/qjob_2017-11-03_MiSeq_S4_Q.sh
#
#===============================================================================
#===============================================================================
# Use all reference sequences to map
### 2016-12-22_MiSeq; Roth75-exp1; MGj46 ( - R75_41,WDR7 / - R75_72,PLEKHG7 )
# sh Y2H_Blastn.sh 2016-12-22_MiSeq 17543_S1_R1 17543_S1_R2 ../data/roth2016_control_set_plus_control 17543_S1 Blastn_All_Ref > qjobs/all.qjob_2016-12-22_MiSeq_S1.sh # ==> 1968532 (Roth -W)
# sh Y2H_Blastn.sh 2016-12-22_MiSeq 17544_S2_R1 17544_S2_R2 ../data/roth2016_control_set_plus_control 17544_S2 Blastn_All_Ref > qjobs/all.qjob_2016-12-22_MiSeq_S2.sh # ==> 1541502 (Roth -A) from 2418986 (trimed seq) from xxx
# sh Y2H_Blastn.sh 2016-12-22_MiSeq 17545_S3_R1 17545_S3_R2 ../data/roth2016_control_set_plus_control 17545_S3 Blastn_All_Ref > qjobs/all.qjob_2016-12-22_MiSeq_S3.sh # ==> 2190445 (Roth Seaprep -W)
# sh Y2H_Blastn.sh 2016-12-22_MiSeq 17546_S4_R1 17546_S4_R2 ../data/roth2016_control_set_plus_control 17546_S4 Blastn_All_Ref > qjobs/all.qjob_2016-12-22_MiSeq_S4.sh # ==> 1877953 (Roth Seaprep -A)
### 2017-02-22_MiSeq; Roth75-exp2; R75_MGj51 ( - R75_41,WDR7 / - R75_72,PLEKHG7 / - AA )
# sh Y2H_Blastn.sh 2017-02-22_MiSeq S1_R1 S1_R2 ../data/roth2016_control_set_plus_control S1 Blastn_All_Ref > qjobs/all.qjob_2017-02-22_MiSeq_S1.sh # (Roth -W) # ==> 960245
# sh Y2H_Blastn.sh 2017-02-22_MiSeq S2_R1 S2_R2 ../data/roth2016_control_set_plus_control S2 Blastn_All_Ref > qjobs/all.qjob_2017-02-22_MiSeq_S2.sh # (Roth -A) # ==> 2024754
# sh Y2H_Blastn.sh 2017-02-22_MiSeq S3_R1 S3_R2 ../data/roth2016_control_set_plus_control S3 Blastn_All_Ref > qjobs/all.qjob_2017-02-22_MiSeq_S3.sh # (Roth -Q) # ==> 813912
### 2017-03-03_MiSeq; Roth75-exp3; [53] ( - R75_41,WDR7 / - R75_72,PLEKHG7 / - AA )
# sh Y2H_Blastn.sh 2017-03-03_MiSeq S1_R1 S1_R2 ../data/roth2016_control_set_plus_control S1 Blastn_All_Ref > qjobs/all.qjob_2017-03-03_MiSeq_S1.sh # (Roth -W) # ==> 1414435
# sh Y2H_Blastn.sh 2017-03-03_MiSeq S2_R1 S2_R2 ../data/roth2016_control_set_plus_control S2 Blastn_All_Ref > qjobs/all.qjob_2017-03-03_MiSeq_S2.sh # (Roth -A) # ==> 969159
# sh Y2H_Blastn.sh 2017-03-03_MiSeq S3_R1 S3_R2 ../data/roth2016_control_set_plus_control S3 Blastn_All_Ref > qjobs/all.qjob_2017-03-03_MiSeq_S3.sh # (Roth -Q) # ==> 1255002
# sh Y2H_Blastn.sh 2017-03-03_MiSeq S4_R1 S4_R2 ../data/roth2016_control_set_plus_control S4 Blastn_All_Ref > qjobs/all.qjob_2017-03-03_MiSeq_S4.sh # (Roth Aonly -W) to check toxicity # ==> 23
# sh Y2H_Blastn.sh 2017-03-03_MiSeq S5_R1 S5_R2 ../data/roth2016_control_set_plus_control S5 Blastn_All_Ref > qjobs/all.qjob_2017-03-03_MiSeq_S5.sh # (Roth Bonly -W) to check toxicity # ==> 169
# sh Y2H_Blastn.sh 2017-03-03_MiSeq S6_R1 S6_R2 ../data/roth2016_control_set_plus_control S6 Blastn_All_Ref > qjobs/all.qjob_2017-03-03_MiSeq_S6.sh # (Roth Bonly -A) to check auto-activation # ==> 1296
# sh Y2H_Blastn.sh 2017-03-03_MiSeq S7_R1 S7_R2 ../data/roth2016_control_set_plus_control S7 Blastn_All_Ref > qjobs/all.qjob_2017-03-03_MiSeq_S7.sh # (Roth Seaprep -Q) # ==> 734766
### 2017-08-22_MiSeq; Roth75-exp4 with Seaprep; [61]
# sh Y2H_Blastn.sh 2017-08-22_MiSeq S64-SWD_S1_R1 S64-SWD_S1_R2 ../data/roth2016_control_set_plus_control S64_SWD Blastn_All_Ref > qjobs/all.qjob_2017-08-22_MiSeq_S64_SWD.sh # SWD (Roth Seaprep -W) # ==> 1491017 out of 2562522 (58.2%) ==> 1450453 (For New)
# sh Y2H_Blastn.sh 2017-08-22_MiSeq S64-SA4D_S2_R1 S64-SA4D_S2_R2 ../data/roth2016_control_set_plus_control S64_SA4D Blastn_All_Ref > qjobs/all.qjob_2017-08-22_MiSeq_S64_SA4D.sh # SWD (Roth Seaprep +A1/4) # ==> 1177404 out of 2321645 (50.7%) ==> 1145754 (For New)
# sh Y2H_Blastn.sh 2017-08-22_MiSeq S64-SA8D_S3_R1 S64-SA8D_S3_R2 ../data/roth2016_control_set_plus_control S64_SA8D Blastn_All_Ref > qjobs/all.qjob_2017-08-22_MiSeq_S64_SA8D.sh # SWD (Roth Seaprep +A1/8) # ==> 1125083 out of 2076630 (54.2%)
# sh Y2H_Blastn.sh 2017-08-22_MiSeq S64-SQD_S4_R1 S64-SQD_S4_R2 ../data/roth2016_control_set_plus_control S64_SQD Blastn_All_Ref > qjobs/all.qjob_2017-08-22_MiSeq_S64_SQD.sh # SWD (Roth Seaprep -Q) # ==> 618177 out of 1437540 (43.0%)
### 2017-06-08_MiSeq; Roth75-exp5 with Seaprep only -W;
# sh Y2H_Blastn.sh 2017-06-08_MiSeq S52_R1 S52_R2 ../data/roth2016_control_set_plus_control S52 Blastn_All_Ref > qjobs/all.qjob_2017-06-08_MiSeq_S52.sh # (Roth Seaprep -W) | |
#!/usr/bin/env python
"""
Copyright (C) 2018 Intel Corporation
?
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
?
http://www.apache.org/licenses/LICENSE-2.0
?
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
?
SPDX-License-Identifier: Apache-2.0
"""
import subprocess
import time
import os
import local
import connection
from testlib.base import base_utils
import signal
class AdbError(Exception):
"""Error for adb connection issues"""
pass
class Adb(connection.Connection):
"""
Singleton object to facilitate adb connection with the device
Only one device per object
serial -- device serial
port -- the port for adb server running on the host
verbose -- if True print some extra messages to STDOUT
"""
__metaclass__ = base_utils.SingletonType
serial = None
port = None
adb = "adb"
cmd_prefix = []
verbose = False
local_conn = local.Local()
def __init__(self, **kwargs):
super(Adb, self).__init__()
if "port" in kwargs:
self.port = kwargs['port']
if self.port:
self.adb = "{0} -P {1}".format(self.adb, self.port)
os.environ['ANDROID_ADB_SERVER_PORT'] = self.port
if "serial" in kwargs:
self.serial = kwargs['serial']
if "verbose" in kwargs:
self.verbose = kwargs['verbose']
self.cmd_prefix = self.adb.split()
self.cmd_prefix.extend(["-s", self.serial])
def run_cmd(self, command, mode="sync", soutfile=None, dont_split=False, timeout=10, env={}, liveprint=True,
ignore_error=False, cmd_type=None):
"""run adb shell command"""
cmd = []
cmd.extend(self.cmd_prefix)
if cmd_type != "reboot":
cmd.append('shell')
if dont_split:
cmd.append(command)
else:
cmd.extend(command.split())
return self.run_cmd_linux(cmd, mode=mode, soutfile=soutfile, timeout=timeout, env=env, liveprint=liveprint,
ignore_error=ignore_error)
def run_cmd_linux(self, command, mode="sync", soutfile=None, timeout=10, env={}, liveprint=True,
ignore_error=False):
"""run linux bash command using Popen"""
if self.verbose:
print "Executing {0}".format(" ".join(command))
__env = os.environ
__env.update(env)
p = None
__err = 'Timeout {0} second(s) reached while executing "{1}"'.format(timeout, " ".join(command))
if soutfile is None:
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=__env)
else:
p = subprocess.Popen(command, stdout=open(soutfile, "wr"), stderr=subprocess.PIPE, env=__env)
if mode.lower() == "sync":
def handler(signum, frame):
raise base_utils.TimeoutError(__err)
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout)
while True:
if self.verbose and soutfile is None and liveprint:
print "STDOUT", p.stdout.read()
print "STDERR", p.stderr.read()
if p.poll() is not None:
if not ignore_error:
__error = p.stderr.read().strip()
if __error != '' and "Warning" not in __error:
signal.alarm(0)
raise AssertionError("Error encountered:\n{0}".format(__error))
signal.alarm(0)
return p
elif mode.lower() == "async":
# Add below lines to fail in case run_cmd returns with failure
# while starting to execute the command
time.sleep(0.5)
if p.poll() not in [None, 0]:
if not ignore_error:
__error = p.stderr.read().strip()
# print __error
if __error != '' and "Warning" not in __error:
raise AssertionError(
"Error encountered:\n{0}".format(__error))
return p
else:
raise AdbError("Mode '{0}' not supported. \
Use only 'sync' or 'async'.".format(mode))
def open_connection(self):
"""connect to device if not already connected"""
if not self.check_connected():
cmd_string = "{0} connect {1}".format(self.adb, self.serial)
self.run_cmd_linux(cmd_string.split(), timeout=10)
time.sleep(1)
return self.check_connected()
def adb_root(self):
"""get adb root session"""
cmd_string = "{0} -s {1} root".format(self.adb, self.serial)
p = self.run_cmd_linux(cmd_string.split(), timeout=5)
if "adbd is already running as root" not in p.stdout.read():
time.sleep(5)
return self.open_connection()
def adb_remount(self):
"""remount /system and /vendor"""
cmd_string = "{0} -s {1} remount".format(self.adb, self.serial)
return self.run_cmd_linux(cmd_string.split(), timeout=10)
def adb_disable_verity(self):
"""Disable verity in order to write in /system partition"""
cmd_string = "{0} -s {1} disable-verity".format(self.adb, self.serial)
return self.run_cmd_linux(cmd_string.split(), timeout=20)
def kill_server(self):
"""kill adb server"""
cmd_string = "{0} kill-server".format(self.adb)
self.run_cmd_linux(cmd_string.split())
time.sleep(1)
def reboot_device(self, reboot_params="", ip_enabled=False, reboot_timeout=60):
"""reboot the device and check it is connected again"""
if self.verbose:
print "Rebooting .."
ip = self.serial.split(":")[0]
cmd = "reboot {0}".format(reboot_params)
reboot_proc = self.run_cmd(cmd, mode="sync", cmd_type="reboot")
if reboot_proc.poll() == 0:
if ip_enabled:
try:
self.local_conn.wait_for_no_ping(ip, timeout=reboot_timeout / 2)
except base_utils.TimeoutError:
return False
self.kill_server()
time.sleep(1)
try:
self.local_conn.wait_for_ping(ip, timeout=reboot_timeout)
except base_utils.TimeoutError:
return False
if reboot_params == "":
self.open_connection()
return True
else:
waiting = 0
while waiting < reboot_timeout:
time.sleep(2)
check = self.check_connected(device_state=reboot_params)
if check is None:
# timeout
return False
if check:
break
waiting += 2
return waiting < reboot_timeout
else:
return False
def check_connected(self, device_state=None):
"""check adb connection with the device"""
if device_state == "recovery":
return self.local_conn.check_adb(serial=self.serial,
device_state=device_state)
if device_state == "fastboot" or device_state == "bootloader":
return self.local_conn.check_fastboot(serial=self.serial)
try:
self.run_cmd("ls sdcard", timeout=20)
except base_utils.TimeoutError:
return None
except Exception:
return False
return True
def close_connection(self):
"""discovnnect from the device"""
cmd_string = "{0} disconnect {1}".format(self.adb, self.serial)
self.run_cmd_linux(cmd_string.split(), timeout=1)
def get_file(self, remote, local, timeout=60):
"""get file from the device"""
cmd = []
cmd.extend(self.cmd_prefix)
cmd_string = "pull {0} {1}".format(remote, local)
cmd.extend(cmd_string.split())
p = self.run_cmd_linux(cmd, timeout=timeout, ignore_error=True, liveprint=False)
err = p.stderr.read()
out = p.stdout.read()
assert "KB/s" in err or "100%" in out or (not err and not out), "Could not get file\n{0}".format(err)
def put_file(self, local, remote, timeout=60):
"""push file to device"""
cmd = []
cmd.extend(self.cmd_prefix)
cmd_string = "push {0} {1}".format(local, remote)
cmd.extend(cmd_string.split())
p = self.run_cmd_linux(cmd, timeout=timeout, ignore_error=True, liveprint=False)
err = p.stderr.read()
out = p.stdout.read()
assert "KB/s" in err or "100%" in out or (not err and not out), "Could not push file\n{0}".format(err)
def install_apk(self, apk, timeout=60):
"""install apk"""
cmd = ["shell", "settings", "get", "secure", "install_non_market_apps"]
cmd = self.cmd_prefix + cmd
p = self.run_cmd_linux(cmd,
timeout=timeout,
ignore_error=True,
liveprint=False)
(unknown_apps_state, err) = p.communicate()
unknown_apps_state = unknown_apps_state.strip()
cmd = ["shell", "settings", "get", "global", "package_verifier_enable"]
cmd = self.cmd_prefix + cmd
p = self.run_cmd_linux(cmd,
timeout=timeout,
ignore_error=True,
liveprint=False)
(package_verifier_state, err) = p.communicate()
package_verifier_state = package_verifier_state.strip()
cmd = ["shell", "settings", "put", "secure", "install_non_market_apps", "1"]
cmd = self.cmd_prefix + cmd
p = self.run_cmd_linux(cmd,
timeout=timeout,
ignore_error=True,
liveprint=False)
cmd = ["shell", "settings", "put", "global", "package_verifier_enable", "0"]
cmd = self.cmd_prefix + cmd
p = self.run_cmd_linux(cmd,
timeout=timeout,
ignore_error=True,
liveprint=False)
cmd = []
cmd.extend(self.cmd_prefix)
cmd_string = "install {0}".format(apk)
cmd.extend(cmd_string.split())
p = self.run_cmd_linux(cmd,
timeout=timeout,
ignore_error=True,
liveprint=False)
err = p.stderr.read()
out = p.stdout.read()
cmd = ["shell", "settings", "put", "secure", "install_non_market_apps", unknown_apps_state]
cmd = self.cmd_prefix + cmd
p = self.run_cmd_linux(cmd,
timeout=timeout,
ignore_error=True,
liveprint=False)
cmd = ["shell", "settings", "put", "global", "package_verifier_enable", package_verifier_state]
cmd = self.cmd_prefix + cmd
p = self.run_cmd_linux(cmd,
timeout=timeout,
ignore_error=True,
liveprint=False)
assert "Success" in out or "ALREADY_EXISTS" in out, "Could: not install apk {0}\Stdout: {1}\nStderr: {" \
"2}".format(apk, out, err)
def uninstall_apk(self, package, timeout=60):
"""install apk"""
cmd = []
cmd.extend(self.cmd_prefix)
cmd_string = "uninstall {0}".format(package)
cmd.extend(cmd_string.split())
p = self.run_cmd_linux(cmd, timeout=timeout, ignore_error=True, liveprint=False)
err = p.stderr.read()
out = p.stdout.read()
assert "Success" in out, "Could: not uninstall package {0}\nStdout: {1}\nStderr: {2}\n".format(package, out,
err)
def kill_command(self, pid):
self.run_cmd("kill {0}".format(pid))
def kill_all(self, pids):
for pid in pids:
self.kill_command(pid)
def cd(self, path):
raise NotImplementedError("Method not overwritten")
def set_env(self, var_name, var_value):
raise NotImplementedError("Method not overwritten")
def unset_env(self, var_name):
raise NotImplementedError("Method not overwritten")
def load_CPU(self):
"""
loads CPU
returns the subprocess object
"""
cmd = "cat /dev/urandom > /dev/null & \
cat /dev/urandom > /dev/null & cat /dev/urandom > /dev/null & \
cat /dev/urandom > /dev/null & cat /dev/urandom > /dev/null"
return self.run_cmd(cmd, mode="async")
def clear_logcat(self):
"""clears logcat"""
self.run_cmd("logcat -c")
def parse_cmd_output(self, cmd, grep_for=None, multiple_grep=None, left_separator=None, right_separator=None,
strip=False, dont_split=False, timeout=60, ignore_error=False):
"""
By default gets the output from adb shell command
Can grep for strings or cut for delimiters
"""
# tmp file name should be uniq to allow getting output from
# multiple devices at the same time
tmp_file_name = "tmp_{0}_{1}_{2}".format("5037" if self.port is None else str(self.port), self.serial.split(
":")[0], str(int(round(time.time() * 1000000))))
self.run_cmd(cmd, soutfile=tmp_file_name, timeout=timeout, dont_split=dont_split, ignore_error=ignore_error)
with open(tmp_file_name, 'r') as f:
string = f.read()
string = base_utils.parse_string(string, grep_for=grep_for, multiple_grep=multiple_grep,
left_separator=left_separator, right_separator=right_separator,
strip=strip)
os.remove(tmp_file_name)
return string
def parse_logcat(self, grep_for=None, left_separator=None, right_separator=None, strip=False):
"""parses logcat output"""
cmd = "logcat -d"
return self.parse_cmd_output(cmd, grep_for=grep_for, left_separator=left_separator,
right_separator0=right_separator, strip=strip)
def parse_dmesg(self, grep_for=None, left_separator=None, right_separator=None, strip=False):
"""parses dmesg output"""
cmd = "dmesg"
return self.parse_cmd_output(cmd, grep_for=grep_for, left_separator=left_separator,
right_separator=right_separator, strip=strip)
def parse_file(self, file_name, grep_for=None, left_separator=None, right_separator=None, strip=False):
"""parses the file located at file_name"""
cmd = "cat {0}".format(file_name)
return self.parse_cmd_output(cmd, grep_for=grep_for, left_separator=left_separator,
right_separator=right_separator, strip=strip)
def check_ping(self, ip):
"""checks ping to an ip from the device"""
cmd = "ping -c1 {0}".format(ip)
return "1 received" in self.parse_cmd_output(cmd)
def check_interface_up(self, interface):
"""
checks interface status from netcfg command
usage:
adb_conn.check_interface_up(wlan0)
"""
return "UP" in self.parse_cmd_output("netcfg", grep_for=interface)
def check_interface_down(self, interface):
"""
checks interface status from netcfg command
usage:
adb_conn.check_interface_up(wlan0)
"""
return "UP" not in self.parse_cmd_output("netcfg", grep_for=interface)
def check_interface_has_ip(self, interface):
"""
checks if interface has an IP address assigned
usage:
adb_conn.check_interface_has_ip(wlan0)
"""
output = self.parse_cmd_output("netcfg", grep_for=interface)
return | |
table header
#loop through parameters provided and write to table with formating
for l in range(numLay):
lay = str(l+1) #conver current layer to string
z = '%.3f' % sum(h[0:l+1]) #gets depth to layer through cumulative summation
VS, VP, PR, RHO, QS, QP = '%.3f' %vs[l], '%.3f' %vp[l], '%.3f' %sigma[l], '%.3f' %rho[l], '%.3f' %qs[l], '%.3f' %qp[l] #str con
if (l+1) == numLay: #for the final half-space layer
f.write(
'{0}{1}{2}{3}{4}{5}{6}{7}{8}{9}{10}\n'.format(
lay.ljust(10),
'HalfSpace'.ljust(10),
'N/A'.ljust(10),
VS.ljust(10),
VP.ljust(10),
PR.ljust(10),
RHO.ljust(10),
QS.ljust(10),
QP.ljust(10),
'0.000'.ljust(10),
'0.000'.ljust(10)
)
)
else: #for all layers except half-space
H = '%.3f' % h[l]
f.write(
'{0}{1}{2}{3}{4}{5}{6}{7}{8}{9}{10}\n'.format(
lay.ljust(10),
z.ljust(10),
H.ljust(10),
VS.ljust(10),
VP.ljust(10),
PR.ljust(10),
RHO.ljust(10),
QS.ljust(10),
QP.ljust(10),
'0.000'.ljust(10),
'0.000'.ljust(10)
)
)
f.write(note) #write note section
f.write(infoLines) #write history lines
for i in range(len(hist)): #loops through history lines and write
f.write(hist[i])
#close file
f.close()
return
def dcVelSum(data, minVel, maxVel, minFreq, maxFreq, X1, dx, dv=1, padLen=None, title='Summed Velocity'):
'''Takes either a gather file or an Obspy stream along with min/max velocity values to check
and min/max frequency values to check between; output is a plot showing summed DC amplitudes for all
frequencies at a given velocity value.
Shows (in theory) non-dispersive energy in the DC image
'''
################
#Calculate Image
################
if isinstance(data, str):
#load in file to get raw data matrix and stream then extract parameters
gather, gatherStream = loadModelDAT(data)
params = getModelParams(gatherStream)
else:
#extract traces from stream and get acquisition parameters
gather = np.stack(t.data for t in data.traces)
params = getModelParams(data)
#pad time axis if specified
if padLen != None:
gather = np.pad(gather, ((0,0),(padLen,padLen)), 'constant', constant_values=(0,0))
#calculate offset vector
xx = np.arange(X1, X1 + params['numTrc']*dx, dx)
#compute fft and associated freqs
G = np.fft.fft(gather)
freqs = np.fft.fftfreq(gather[0].size, params['dt'])
#select only positive frequencies from fft ouput; i.e. first half
Gp = G[:,:freqs.size//2]
freqsp = freqs[:freqs.size//2]
#select frequencies
df = freqs[10]-freqs[9]
fMax = int(maxFreq/df)
fMin = int(minFreq/df)
#set up velocities to test
testVels = np.arange(minVel, maxVel, dv)
#create empty array to hold transformed data and mode picks
V = np.zeros((len(freqsp[fMin:fMax]), len(testVels)))
numF = V.shape[0]
######TRANSFORM
#run through freqs first
for f in range(len(freqsp[fMin:fMax])):
#then run through each test velocity
#print(freqsp[f+fMin])
for v in range(len(testVels)):
V[f,v] = np.abs( np.sum( Gp[:,f+fMin]/np.abs(Gp[:,f+fMin]) * np.exp(1j*2*np.pi*freqsp[f+fMin]*xx /testVels[v]) ) )
#normalize by the numbre of traces in the gathre (as suggested by Olafsdottir 2018)
Vnorm = V/params['numTrc']
#calculate summation for all frequencies along single velocity value
vSum = Vnorm.sum(axis=0) / numF #normalize by number of frequency values
vSum_array = np.asarray([testVels, vSum])
#########
#PLOT
#########
#get min/max range of velocity values and appropriate step
tickStep = int(np.ceil(((maxVel - minVel)/20) / 10) * 10) #divide vel range into 20 and round to nearest 10
yTicks = np.arange(minVel, maxVel+tickStep, tickStep)
fig = plt.figure(figsize=(15,6))
gs = gridspec.GridSpec(1,2, width_ratios=[0.9,0.1], wspace=0.01)
vsAX = plt.subplot(gs[0,1])
vsAX.plot(vSum, testVels)
vsAX.set_xlabel('Amplitude Summation')
vsAX.set_yticks(yTicks)
vsAX.yaxis.tick_right()
vsAX.set_ylim(minVel, maxVel)
vsAX.grid(which='both')
vsAX.set_title('Vel. Sum.')
#DC image
dcAX = plt.subplot(gs[0,0])
dispC = dcAX.imshow(Vnorm.T, aspect='auto', interpolation='none', extent=[fMin*df,fMax*df,maxVel,minVel])
dcAX.invert_yaxis()
dcAX.set_xlabel('Frequency [Hz]')
dcAX.set_ylabel('Phase Velocity [m/s]')
dcAX.set_title('Dispersion Image')
dcAX.grid(which='both')
#fig.colorbar(dispC, ax = ax, shrink = 0.7)
#Plot title
supT = fig.suptitle(title, x=0.445, y=0.98, fontsize=14)
plt.show()
return
def dcVelSum_MODEL(modPath, fmin=2, fmax=100, vmin=50, vmax=1500, dv=1, dx=1.5, X1=9, padLen=1000):
#get DC folders, seismic paths, and LYR paths first
dcFolder, seisFiles, lyrFiles = getModelFolderPaths(modPath)
###
#MAIN LOOP THROUGH MODEL FILES
###
for dcF, seisF, lyrF in zip(dcFolder, seisFiles, lyrFiles):
#LOAD SEISMIC
modTrcs, modStrm = loadModelDAT(seisF)
#LOAD IN EXPERIMENTAL DC FILES
dcDict = loadTheorDC_PATH(dcF)
#PLOT
#get plot title
plotTitle = seisF.split('\\')[-1] #gets title for model
dcVelSum(modStrm, minVel=vmin, maxVel=vmax, minFreq=fmin, maxFreq=fmax, X1=X1, dx=dx,
dv=dv, padLen=padLen, title=plotTitle)
plt.show()
######################
#PLOT INV LAYER FILES
######################
def plotLYRFile_INV(invFile, originalFile=None, title='Vs Profile', ax=None, vBounds=None, zBounds=None, legendFS=10,
save=False, fs=None, **kwargs):
'''
Takes at min one layer file which is output from inversion and plots it
Optional second argument is the original model layer file which can be plotted for comparison
'''
#parse kwargs
saveTitle = kwargs.get('saveTitle',None)
#take file and turn into dict if only one is passed
if isinstance(invFile, dict) == False:
fileDict = {'':invFile}
else:
fileDict = invFile
#load original file if passed
if originalFile != None:
origFileVals = readLYRFile(originalFile)
z_orig = origFileVals['Depth']
vs_orig = origFileVals['Vs']
vp_orig = origFileVals['Vp']
rho_orig = origFileVals['Density']
#PLOT
if fs != None: #set global font size for thesis plots
plt.rcParams.update({'font.size': fs}) #global font size
if ax != None:
ax = ax
msg = 'Master Plot'
else:
fig, ax = plt.subplots(1,1, figsize=(5,10))
msg = 'No Master Plot'
#plot original file first
if originalFile != None:
ax.plot(vs_orig, z_orig, c='k', lw=2, ls='--', label='Original')
#plot all files in dict
for fKey in fileDict:
#load layer files
invFileVals = readLYRFile(fileDict[fKey])
z_inv = invFileVals['Depth']
vs_inv = invFileVals['Vs']
vp_inv = invFileVals['Vp']
rho_inv = invFileVals['Density']
majorYLocator = MultipleLocator(1) #sets the major tick interval to every meter
majorYFormatter = FormatStrFormatter('%d')
minorYLocator = MultipleLocator(0.25) #sets the minor tick interval to every 0.25 meter
majorXLocator = MultipleLocator(100) #sets the major tick interval to every meter
majorXFormatter = FormatStrFormatter('%d')
minorXLocator = MultipleLocator(50) #sets the minor tick interval to every 0.25 meter
ax.plot(vs_inv, z_inv, lw=2, label='Inverted: \n %s' %fKey)
#format
ax.set_xlabel('Vs [m/s]')
ax.set_ylabel('Depth [m]')
ax.invert_yaxis()
#yticks
ax.yaxis.set_major_locator(majorYLocator)
ax.yaxis.set_major_formatter(majorYFormatter)
ax.yaxis.set_minor_locator(minorYLocator)
#xticks
ax.xaxis.set_major_locator(majorXLocator)
ax.xaxis.set_major_formatter(majorXFormatter)
ax.xaxis.set_minor_locator(minorXLocator)
ax.grid(which='both', linestyle='--', alpha=0.75)
ax.set_axisbelow(True)
ax.tick_params(axis='x', labelrotation=90)
ax.legend(fontsize=legendFS)
ax.set_title(title)
#apply x and y limits if specified
if vBounds != None:
vB = np.asarray(vBounds)
ax.set_xlim(vB[0], vB[1])
if zBounds != None:
zB = np.asarray(zBounds)
ax.set_ylim(zB[0], zB[1])
ax.invert_yaxis() #re invert after setting bounds
if msg == 'No Master Plot':
#fig.suptitle(title, y=0.92)
plt.subplots_adjust(wspace=0.05, hspace=0)
plt.show()
if save:
plt.savefig(saveTitle, dpi=100, bbox_inches='tight')
plt.show()
return
#################
#INV SUMMARY PLOT
#################
def plotINVSummary(seisFile, expDC, invLYRFile, invDC, oneD=True, origLYRFile=None, fmin=2, fmax=100,
vmin=50, vmax=1500, dv=0.1, dx=1.5, X1=9, vBounds=None, zBounds=None, save=False, fs=None):
'''
Takes at minimum a original seismic file, extracted experimental DC, and a inverted model contained in a .LYR file and plots
a summary figure.
Optional input is a original .LYR for comparison which can either be the known true model or initial model for inversion
INPUTS:
seisFile - .dat file (vertical comp.) containing seismic record that was inverted
expDC - the extracted fundamental mode file from the DC image generated from the provided seisFile
invDC - file pointing to the theoretical DC calculated from the inverted model
invLYRFile - .LYR file containing the resultant layered earth model generated during inversion
origLYRFile - the original or initial .LYR file containing the layered earth model
'''
#load traces and stream from provided seismic file
t, s = loadModelDAT(seisFile)
#load the extracted experimental DC curve provided into dict
DC_exp = loadTheorDC(expDC, asDict=True, dictKey='Experimental DC')
#if provided inverted DC is single file, load the modeled DC curve provided into dict
if isinstance(invDC, dict) == False:
DC_inv = loadTheorDC(invDC, asDict=True, dictKey='Inverted Model DC')
elif isinstance(invDC, dict):
DC_inv = invDC #store provided dict into var to be merged
#merge two DC into single dict for plotting
DC_plotDict = {**DC_exp, **DC_inv}
###
#PLOT SUMMARY FIG
###
if fs != None: #set global font size for thesis plots
plt.rcParams.update({'font.size': fs}) #global font size
fig = plt.figure(figsize=(14,5.5))
gs = gridspec.GridSpec(1,2, width_ratios=[0.2,0.9])
#get plot title
plotTitle = 'Inversion Summary for ' + seisFile.split('\\')[-1] #gets title for model
#LAYER PLOT
lyrAX = plt.subplot(gs[0,0])
plotLYRFile_INV(invFile=invLYRFile, originalFile=origLYRFile, vBounds=vBounds, zBounds=zBounds, ax=lyrAX)
if fs != None:
lyrAX.legend(fontsize=fs*.75) #super stinky way to get this done; quick and dirty
lyrAX.set_title('')
#DISP IMAGE PLOT
dcAX = plt.subplot(gs[0,1])
dcModelPhaseShift(s, minVel=vmin, maxVel=vmax, minFreq=fmin, dv=dv, maxFreq=fmax, padLen=1000,
X1=X1, dx=dx, overLayDC=DC_plotDict, ax=dcAX)
if fs != None:
dcAX.legend(fontsize=fs*.75) #super stinky way to get this done; quick and dirty
dcAX.set_title('')
if save == False:
fig.colorbar(dcAX.images[0], ax=dcAX, shrink=0.7)
#Set title and save
if save == False: supT = fig.suptitle(plotTitle, x=0.445, y=0.97, fontsize=14)
if save:
figTitle = 'INVsummary_' + seisFile.split('\\')[-1]
figTitle = figTitle.replace('.dat','')
#plt.savefig(figTitle, dpi=75, bbox_inches='tight', bbox_extra_artists=[supT])
plt.savefig(figTitle, dpi=100, bbox_inches='tight')
plt.show()
############
#PRINT INV SUMMARY
############
if isinstance(invLYRFile, dict) == False:
oneORtwo = oneD #if the inverted layer files are from a 1D or 2D inversion
printLYRfile(invLYRFile, inv=oneORtwo)
return
#################
#APPROX INV
#################
def approxINV(expDCFile, zConv=0.3, vsConv=0.92):
'''
Take and experimental DC and calculates then plots an approximate inversion.
Uses 0.3 of wavelength for depth proxy and 0.92 as velocity proxy
'''
#load in the exp DC
dc = loadTheorDC(expDCFile, asDict=True, dictKey='Experimental DC')
#get frequency and phase velocity
f = dc['Experimental DC'][0]
c = dc['Experimental DC'][1]
#convert from frequency to wavelength
lamb = c/f
#calculate aprox depths and Vs values
z = lamb*zConv
vs = c/vsConv
######
#PLOT
######
fig, ax = plt.subplots(1,1, figsize=(6,10)) #w,h
majorYLocator = MultipleLocator(1) #sets the major tick interval to every meter
majorYFormatter = FormatStrFormatter('%d')
minorYLocator = MultipleLocator(0.25) #sets the minor tick interval to every 0.25 meter
majorXLocator = MultipleLocator(50) #sets the major tick interval to every meter
majorXFormatter = FormatStrFormatter('%d')
minorXLocator = MultipleLocator(25) #sets the minor tick interval to every 0.25 meter
ax.plot(vs, z, marker='o', fillstyle='none')
ax.set_xlabel('Vs [m/s]')
ax.set_ylabel('Depth [m]')
ax.invert_yaxis()
#yticks
ax.yaxis.set_major_locator(majorYLocator)
ax.yaxis.set_major_formatter(majorYFormatter)
ax.yaxis.set_minor_locator(minorYLocator)
#xticks
ax.xaxis.set_major_locator(majorXLocator)
ax.xaxis.set_major_formatter(majorXFormatter)
ax.xaxis.set_minor_locator(minorXLocator)
ax.grid(which='both', linestyle='--', alpha=0.75)
ax.tick_params(axis='x', labelrotation=90)
plt.show()
return
def experDC_quicklook(file):
'''
Takes an experimental DC curve file and plots a quick look overview
Includes a scatter plot with histograms showing value distribution,
a summary of the min and max values, and a quick inversion
'''
#extract experimental DC curve to dict
DC = loadTheorDC(fname=file, asDict=True, dictKey='Experimental DC')
DC_f, DC_v = DC['Experimental DC'][0], DC['Experimental DC'][1]
#seaborn join plot
jp = sns.jointplot(DC_f, DC_v, space=0, marginal_kws=dict(bins=20), stat_func=None)
jp.set_axis_labels('Frequency [Hz]', 'Phase Velocity [m/s]')
plt.show()
#min max summary
print('The bounding velocities in the experimental DC are: ', np.nanmin(DC_v), np.nanmax(DC_v))
print('The bounding frequencies in the experimental DC are: ', np.nanmin(DC_f), np.nanmax(DC_f))
print('The minimum and maximum wavelengths are: ', (np.nanmin(DC_v)/np.nanmax(DC_f)), (np.nanmax(DC_v)/np.nanmin(DC_f)))
#plot quick inversion
approxINV(expDCFile=file, zConv=0.25)
return
def approxINV_2D(path, dx, zConv=0.3, vsConv=0.92, legend=True, returnDC=False, flip=False):
'''
Take a path to a folder containing experimental DC for a 2D profile and
plots them as an approximate inversion
path - path to extracted dispersion curves
dx - spacing between each measurement
zConv - wavelength conversion for depth
vsConv - relation between measured phase velocity and shear velocity
**********NOTE************
THIS ASSUMES SHOTS ARE NAMED IN SEQUENCE WITHIN THE FOLDER WITH
THEIR RELATIVE POSITION AT THE END OF THE FILE NAME
eg:
Line01_Shot01(Model).DC
Line01_Shot02(Model).DC
...
etc.
'''
##########
#LOAD THE EXP. DC'S
##########
expDCs = {}
for root, dirs, | |
output_format == "Phase":
return torch.atan2(
-spec_imag + 0.0, spec_real
) # +0.0 removes -0.0 elements, which leads to error in calculating phase
def inverse(self, X, onesided=True, length=None, refresh_win=True):
"""
This function is same as the :func:`~nnAudio.Spectrogram.iSTFT` class,
which is to convert spectrograms back to waveforms.
It only works for the complex value spectrograms. If you have the magnitude spectrograms,
please use :func:`~nnAudio.Spectrogram.Griffin_Lim`.
Parameters
----------
onesided : bool
If your spectrograms only have ``n_fft//2+1`` frequency bins, please use ``onesided=True``,
else use ``onesided=False``
length : int
To make sure the inverse STFT has the same output length of the original waveform, please
set `length` as your intended waveform length. By default, ``length=None``,
which will remove ``n_fft//2`` samples from the start and the end of the output.
refresh_win : bool
Recalculating the window sum square. If you have an input with fixed number of timesteps,
you can increase the speed by setting ``refresh_win=False``. Else please keep ``refresh_win=True``
"""
if (hasattr(self, "kernel_sin_inv") != True) or (
hasattr(self, "kernel_cos_inv") != True
):
raise NameError(
"Please activate the iSTFT module by setting `iSTFT=True` if you want to use `inverse`"
)
assert X.dim() == 4, (
"Inverse iSTFT only works for complex number,"
"make sure our tensor is in the shape of (batch, freq_bins, timesteps, 2)."
"\nIf you have a magnitude spectrogram, please consider using Griffin-Lim."
)
return self.inverse_stft(
X, self.kernel_cos_inv, self.kernel_sin_inv, onesided, length, refresh_win
)
def extra_repr(self) -> str:
return "n_fft={}, Fourier Kernel size={}, iSTFT={}, trainable={}".format(
self.n_fft, (*self.wsin.shape,), self.iSTFT, self.trainable
)
class iSTFT(STFTBase):
"""This class is to convert spectrograms back to waveforms. It only works for the complex value spectrograms.
If you have the magnitude spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`.
The parameters (e.g. n_fft, window) need to be the same as the STFT in order to obtain the correct inverse.
If trainability is not required, it is recommended to use the ``inverse`` method under the ``STFT`` class
to save GPU/RAM memory.
When ``trainable=True`` and ``freq_scale!='no'``, there is no guarantee that the inverse is perfect, please
use with extra care.
Parameters
----------
n_fft : int
The window size. Default value is 2048.
freq_bins : int
Number of frequency bins. Default is ``None``, which means ``n_fft//2+1`` bins
Please make sure the value is the same as the forward STFT.
hop_length : int
The hop (or stride) size. Default value is ``None`` which is equivalent to ``n_fft//4``.
Please make sure the value is the same as the forward STFT.
window : str
The windowing function for iSTFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
Please make sure the value is the same as the forward STFT.
freq_scale : 'linear', 'log', or 'no'
Determine the spacing between each frequency bin. When `linear` or `log` is used,
the bin spacing can be controlled by ``fmin`` and ``fmax``. If 'no' is used, the bin will
start at 0Hz and end at Nyquist frequency with linear spacing.
Please make sure the value is the same as the forward STFT.
center : bool
Putting the iSTFT keneral at the center of the time-step or not. If ``False``, the time
index is the beginning of the iSTFT kernel, if ``True``, the time index is the center of
the iSTFT kernel. Default value if ``True``.
Please make sure the value is the same as the forward STFT.
fmin : int
The starting frequency for the lowest frequency bin. If freq_scale is ``no``, this argument
does nothing. Please make sure the value is the same as the forward STFT.
fmax : int
The ending frequency for the highest frequency bin. If freq_scale is ``no``, this argument
does nothing. Please make sure the value is the same as the forward STFT.
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
trainable_kernels : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT
kernels will also be caluclated and the STFT kernels will be updated during model training.
Default value is ``False``.
trainable_window : bool
Determine if the window function is trainable or not.
Default value is ``False``.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints.
Returns
-------
spectrogram : torch.tensor
It returns a batch of waveforms.
Examples
--------
>>> spec_layer = Spectrogram.iSTFT()
>>> specs = spec_layer(x)
"""
def __init__(
self,
n_fft=2048,
win_length=None,
freq_bins=None,
hop_length=None,
window="hann",
freq_scale="no",
center=True,
fmin=50,
fmax=6000,
sr=22050,
trainable_kernels=False,
trainable_window=False,
verbose=True,
refresh_win=True,
):
super().__init__()
# Trying to make the default setting same as librosa
if win_length == None:
win_length = n_fft
if hop_length == None:
hop_length = int(win_length // 4)
self.n_fft = n_fft
self.win_length = win_length
self.stride = hop_length
self.center = center
self.pad_amount = self.n_fft // 2
self.refresh_win = refresh_win
start = time()
# Create the window function and prepare the shape for batch-wise-time-wise multiplication
# Create filter windows for inverse
kernel_sin, kernel_cos, _, _, window_mask = create_fourier_kernels(
n_fft,
win_length=win_length,
freq_bins=n_fft,
window=window,
freq_scale=freq_scale,
fmin=fmin,
fmax=fmax,
sr=sr,
verbose=False,
)
window_mask = get_window(window, int(win_length), fftbins=True)
# For inverse, the Fourier kernels do not need to be windowed
window_mask = torch.tensor(window_mask).unsqueeze(0).unsqueeze(-1)
# kernel_sin and kernel_cos have the shape (freq_bins, 1, n_fft, 1) to support 2D Conv
kernel_sin = torch.tensor(kernel_sin, dtype=torch.float).unsqueeze(-1)
kernel_cos = torch.tensor(kernel_cos, dtype=torch.float).unsqueeze(-1)
# Decide if the Fourier kernels are trainable
if trainable_kernels:
# Making all these variables trainable
kernel_sin = nn.Parameter(kernel_sin, requires_grad=trainable_kernels)
kernel_cos = nn.Parameter(kernel_cos, requires_grad=trainable_kernels)
self.register_parameter("kernel_sin", kernel_sin)
self.register_parameter("kernel_cos", kernel_cos)
else:
self.register_buffer("kernel_sin", kernel_sin)
self.register_buffer("kernel_cos", kernel_cos)
# Decide if the window function is trainable
if trainable_window:
window_mask = nn.Parameter(window_mask, requires_grad=trainable_window)
self.register_parameter("window_mask", window_mask)
else:
self.register_buffer("window_mask", window_mask)
if verbose == True:
print(
"iSTFT kernels created, time used = {:.4f} seconds".format(
time() - start
)
)
else:
pass
def forward(self, X, onesided=False, length=None, refresh_win=None):
"""
If your spectrograms only have ``n_fft//2+1`` frequency bins, please use ``onesided=True``,
else use ``onesided=False``
To make sure the inverse STFT has the same output length of the original waveform, please
set `length` as your intended waveform length. By default, ``length=None``,
which will remove ``n_fft//2`` samples from the start and the end of the output.
If your input spectrograms X are of the same length, please use ``refresh_win=None`` to increase
computational speed.
"""
if refresh_win == None:
refresh_win = self.refresh_win
assert X.dim() == 4, (
"Inverse iSTFT only works for complex number,"
"make sure our tensor is in the shape of (batch, freq_bins, timesteps, 2)"
)
return self.inverse_stft(
X, self.kernel_cos, self.kernel_sin, onesided, length, refresh_win
)
if __name__ == "__main__":
# in Installation dir, python -m nnAudio.features.stft
import soundfile as sf
y, sr = sf.read('piano.wav')
y = torch.tensor(y, device='cpu').float().reshape(1, -1)
spec_layer = STFT(n_fft=1024, win_length=1024, freq_bins=None, hop_length=256,
window='hanning', center=True, pad_mode='constant',
sr=sr, output_format='Complex', iSTFT=True)
spec = spec_layer(y) # (1, 513, 663, 2)
y_hat = spec_layer.inverse(spec, length = y.shape[-1])
sf.write('piano_gen.wav', y_hat.numpy()[0], sr, 'PCM_16')
# Inverse STFT was done!
print(torch.mean((y - y_hat)**2)) # tensor(1.8935e-15)
# check with the result of SMS tool
absX = torch.sqrt(spec[..., 0].pow(2) + spec[..., 1].pow(2))
mX = 20 * torch.log10(absX).numpy()
print(mX[0].T)
pX = torch.atan2(spec[..., 0], -spec[..., 1])
pX = np.unwrap(pX.numpy(), axis = 1)
pX_diff = np.diff(pX, axis = 1)
print(pX_diff[0].T)
np.save('pX_diff.npy', pX_diff[0].T)
'''
This code outputs:
mX
[[ -54.44288 -56.47573 -62.03878 ... -107.534065 -108.65582
-109.02787 ]
[ -49.09628 -53.011593 -68.65705 ... -117.8772 -113.73474
-118.49428 ]
[ -48.05722 -54.618782 -60.830635 ... -115.31365 -119.57239
-128.78262 ]
...
[ -48.83616 -54.508442 -61.27616 ... -126.33887 -113.718346
-109.300835]
[ -50.005646 -55.09293 -62.532623 ... -128.21506 -116.32202
-107.124954]
[ -54.041245 -57.00664 -67.318436 ... -109.26711 -106.14801
-103.48395 ]]
pX
[[-8.80199075e-01 -5.75353384e-01 -7.70502090e-02 ... -7.30743408e-02
9.79537964e-02 1.27990723e-01]
[-1.82757258e-01 -1.26417732e+00 -1.81887794e+00 ... 2.06545258e+00
1.29386139e+00 1.93171692e+00]
[ 2.03311443e-03 2.60517168e+00 -4.74282444e-01 ... -1.02615356e-02
-2.84988403e-01 -1.46266937e+00]
...
[ 1.48018599e-01 1.26699878e+00 1.28380056e-01 ... 2.35776854e+00
1.00008821e+00 -8.13722610e-02]
[ 8.07777643e-02 -8.76696825e-01 1.56771421e-01 ... 2.26985931e+00
2.31286240e+00 -3.90001297e-01]
[ 7.64261723e-01 1.22351828e+00 1.98449704e+00 | |
"long-short-exit":
self.plt_3.addItem(self.scatter_exit_pred_long)
self.plt_3.addItem(self.scatter_exit_pred_short)
self.plt_3.addItem(self.scatter_exit_gain_pred)
self.plt_3.addItem(self.scatter_exit_stop_pred)
self.plt_3.addItem(self.x_line_plt3, ignoreBounds=True)
self.plt_3.addItem(self.y_line_plt3, ignoreBounds=True)
self.plt_1.enableAutoRange()
self.plt_2.enableAutoRange()
self.plt_3.enableAutoRange()
def mouse_event(self, evt):
if self.plt_1.sceneBoundingRect().contains(evt[0]):
mousePoint_1 = self.plt_1.vb.mapSceneToView(evt[0])
self.x_line_plt1.setPos(mousePoint_1.x())
self.y_line_plt1.setPos(mousePoint_1.y())
self.x_line_plt1.show()
self.y_line_plt1.show()
else:
self.x_line_plt1.hide()
self.y_line_plt1.hide()
if self.plt_2.sceneBoundingRect().contains(evt[0]):
mousePoint_2 = self.plt_2.vb.mapSceneToView(evt[0])
self.x_line_plt2.setPos(mousePoint_2.x())
self.y_line_plt2.setPos(mousePoint_2.y())
self.x_line_plt2.show()
self.y_line_plt2.show()
else:
self.x_line_plt2.hide()
self.y_line_plt2.hide()
if self.plt_3.sceneBoundingRect().contains(evt[0]):
mousePoint_3 = self.plt_3.vb.mapSceneToView(evt[0])
self.x_line_plt3.setPos(mousePoint_3.x())
self.y_line_plt3.setPos(mousePoint_3.y())
self.x_line_plt3.show()
self.y_line_plt3.show()
else:
self.x_line_plt3.hide()
self.y_line_plt3.hide()
def show_scatter(self, df_1):
sig_ref = self.df_1.c
size_1 = 9
size_2 = 8
if self.showplt5 == 2 and self.showplt4 == 1:
self.showplt5 = 1
sig_ref = self.equity_curve_pred
size_1 = 4
size_2 = 4
elif self.showplt5 == 2 and self.showplt4 == 2:
self.showplt5 = 1
sig_ref = self.cumulative_amount_curve_str
size_1 = 4
size_2 = 4
if self.logic == "long":
# Scatter1 Long y_true
self.scatter_long_true = pg.ScatterPlotItem(self.df_1.index[self.df_1.signals_true_scatter == 1],
sig_ref[self.df_1.signals_true_scatter == 1],
size=size_1, pen='#00FF7F', symbol='t1', brush='#008B00')
# Scatter1 Exit-long y_true
self.scatter_short_true = pg.ScatterPlotItem(self.df_1.index[self.df_1.signals_true_scatter == -2],
sig_ref[self.df_1.signals_true_scatter == -2],
size=size_1, pen='#FF0000', symbol='t', brush='#1E1E1E')
# Scatter2 Long y_pred
self.scatter_long_pred = pg.ScatterPlotItem(self.df_1.index[self.df_1.signals_pred_scatter == 1],
sig_ref[self.df_1.signals_pred_scatter == 1],
size=size_1, pen='#00FF7F', symbol='t1', brush='#008B00')
# Scatter2 Exit-Long y_pred
self.scatter_short_pred = pg.ScatterPlotItem(self.df_1.index[self.df_1.signals_pred_scatter == -2],
sig_ref[self.df_1.signals_pred_scatter == -2],
size=size_1, pen='#FF0000', symbol='t', brush='#1E1E1E')
elif self.logic == "short":
# Scatter1 Exit-Sort y_true
self.scatter_long_true = pg.ScatterPlotItem(self.df_1.index[self.df_1.signals_true_scatter == 2],
sig_ref[self.df_1.signals_true_scatter == 2],
size=size_1, pen='#00FF7F', symbol='t1', brush='#1E1E1E')
# Scatter1 Short y_true
self.scatter_short_true = pg.ScatterPlotItem(self.df_1.index[self.df_1.signals_true_scatter == -1],
sig_ref[self.df_1.signals_true_scatter == -1],
size=size_1, pen='#FF0000', symbol='t', brush='#8B0000')
# Scatter2 Exit-Short y_pred
self.scatter_long_pred = pg.ScatterPlotItem(self.df_1.index[self.df_1.signals_pred_scatter == 2],
sig_ref[self.df_1.signals_pred_scatter == 2],
size=size_1, pen='#00FF7F', symbol='t1', brush='#1E1E1E')
# Scatter2 Short y_pred
self.scatter_short_pred = pg.ScatterPlotItem(self.df_1.index[self.df_1.signals_pred_scatter == -1],
sig_ref[self.df_1.signals_pred_scatter == -1],
size=size_1, pen='#FF0000', symbol='t', brush='#8B0000')
else:
# Scatter1 Long y_true
self.scatter_long_true = pg.ScatterPlotItem(self.df_1.index[self.df_1.signals_true_scatter == 1],
sig_ref[self.df_1.signals_true_scatter == 1],
size=size_1, pen='#00FF7F', symbol='t1', brush='#008B00')
# Scatter1 Short y_true
self.scatter_short_true = pg.ScatterPlotItem(self.df_1.index[self.df_1.signals_true_scatter == -1],
sig_ref[self.df_1.signals_true_scatter == -1],
size=size_1, pen='#FF0000', symbol='t', brush='#8B0000')
# Scatter2 Long y_pred
self.scatter_long_pred = pg.ScatterPlotItem(self.df_1.index[self.df_1.signals_pred_scatter == 1],
sig_ref[self.df_1.signals_pred_scatter == 1],
size=size_1, pen='#00FF7F', symbol='t1', brush='#008B00')
# Scatter2 Short y_pred
self.scatter_short_pred = pg.ScatterPlotItem(self.df_1.index[self.df_1.signals_pred_scatter == -1],
sig_ref[self.df_1.signals_pred_scatter == -1],
size=size_1, pen='#FF0000', symbol='t', brush='#8B0000')
# Scatter1 Exit y_true Long
self.scatter_exit_true_long = pg.ScatterPlotItem(self.df_1.index[self.df_1.signals_true_scatter == -2],
sig_ref[self.df_1.signals_true_scatter == -2],
size=size_1, pen='#FF0000', symbol='t', brush='#1E1E1E')
# Scatter1 Exit y_true Short
self.scatter_exit_true_short = pg.ScatterPlotItem(self.df_1.index[self.df_1.signals_true_scatter == 2],
sig_ref[self.df_1.signals_true_scatter == 2],
size=size_1, pen='#00FF7F', symbol='t1', brush='#1E1E1E')
# Scatter1 Exit y_pred Long
self.scatter_exit_pred_long = pg.ScatterPlotItem(self.df_1.index[self.df_1.signals_pred_scatter == -2],
sig_ref[self.df_1.signals_pred_scatter == -2],
size=size_1, pen='#FF0000', symbol='t', brush='#1E1E1E')
# Scatter1 Exit y_pred Short
self.scatter_exit_pred_short = pg.ScatterPlotItem(self.df_1.index[self.df_1.signals_pred_scatter == 2],
sig_ref[self.df_1.signals_pred_scatter == 2],
size=size_1, pen='#00FF7F', symbol='t1', brush='#1E1E1E')
# Scatter exit-gain y_true
self.scatter_exit_gain_true = pg.ScatterPlotItem(self.df_1.index[self.df_1.signals_true_scatter == 4],
sig_ref[self.df_1.signals_true_scatter == 4],
size=size_2, pen='#FFFF00', symbol='+', brush='#FFFF00')
# Scatter exit-stop y_true
self.scatter_exit_stop_true = pg.ScatterPlotItem(self.df_1.index[self.df_1.signals_true_scatter == -4],
sig_ref[self.df_1.signals_true_scatter == -4],
size=size_2, pen='#FF00FF', symbol='x', brush='#1E1E1E')
# Scatter exit-gain y_pred
self.scatter_exit_gain_pred = pg.ScatterPlotItem(self.df_1.index[self.df_1.signals_pred_scatter == 4],
sig_ref[self.df_1.signals_pred_scatter == 4],
size=size_2, pen='#FFFF00', symbol='+', brush='#FFFF00')
# Scatter exit-stop y_pred
self.scatter_exit_stop_pred = pg.ScatterPlotItem(self.df_1.index[self.df_1.signals_pred_scatter == -4],
sig_ref[self.df_1.signals_pred_scatter == -4],
size=size_2, pen='#FF00FF', symbol='x', brush='#1E1E1E')
def show_risk_metrics(self, N, rf):
# Sharpe Ratio
mean = self.strategy_returns_pred.mean() * N - (rf / 100)
sigma = self.strategy_returns_pred.std() * np.sqrt(N)
self.sharpe_ratio = mean / sigma
# Sortino Ratio
mean = self.strategy_returns_pred.mean() * N - (rf / 100)
std_neg = self.strategy_returns_pred[self.strategy_returns_pred < 0].std() * np.sqrt(N)
self.sortino_ratio = mean / std_neg
# Calmar Ratio
mean = self.strategy_returns_pred.mean() * N
self.calmar_ratio = mean / abs(self.drawdown[0].min())
text = "SHARPE: {} ".format(round(self.sharpe_ratio, 2)) + \
"SORTINO: {} ".format(round(self.sortino_ratio, 2)) + \
"CALMAR: {} ".format(round(self.calmar_ratio, 2))
self.risk_metrics_textitem.setText(text=text)
self.risk_metrics_textitem.show()
@staticmethod
def apply_tax(positions, strategy_returns_log, maker_tax):
for i in range(len(strategy_returns_log)):
if i > 1:
if positions[i - 2] == 1 and positions[i - 1] == -1:
strategy_returns_log[i] = strategy_returns_log[i] - ((2 * float(maker_tax)) / 100)
elif positions[i - 2] == -1 and positions[i - 1] == 1:
strategy_returns_log[i] = strategy_returns_log[i] - ((2 * float(maker_tax)) / 100)
elif positions[i - 2] == 0 and positions[i - 1] != 0:
strategy_returns_log[i] = strategy_returns_log[i] - (float(maker_tax) / 100)
elif positions[i - 2] != 0 and positions[i - 1] == 0:
strategy_returns_log[i] = strategy_returns_log[i] - (float(maker_tax) / 100)
elif np.isnan(positions[i - 2]) and positions[i - 1] != 0:
strategy_returns_log[i] = strategy_returns_log[i] - (float(maker_tax) / 100)
return strategy_returns_log
@staticmethod
def iter_df(df_1, pos_true, pos_pred, pct_rate, stop_rate, gain_rate, logic, amount):
df_1.reset_index(drop=True, inplace=True)
pos_var_1 = 0
pos_var_2 = 0
pos_var_3 = 0
pos_var_4 = 0
c_ref_true = 0
c_ref_pred = 0
df_1["positions_pred_ref"] = np.nan
df_1["positions_true_ref"] = np.nan
df_1['signals_true'] = np.nan
df_1['signals_pred'] = np.nan
df_1['signals_true_scatter'] = 0
df_1['signals_pred_scatter'] = 0
df_1['positions_pred'] = np.nan
df_1['positions_true'] = np.nan
df_1['amount_str'] = 0.0
df_1.loc[0, 'amount_str'] = amount
df_1['amount_hold'] = 0.0
df_1.loc[0, 'amount_hold'] = amount
if pct_rate is not None:
pct_rate = pct_rate / 100
if logic == "long":
df_1.positions_true_ref = np.where(
df_1['true'] >= pct_rate, amount, np.where(df_1['true'] <= -pct_rate, 0.0, np.nan))
df_1.positions_pred_ref = np.where(
df_1['pred'] >= pct_rate, amount, np.where(df_1['pred'] <= -pct_rate, 0.0, np.nan))
elif logic == "short":
df_1.positions_true_ref = np.where(
df_1['true'] >= pct_rate, 0.0, np.where(df_1['true'] <= -pct_rate, -amount, np.nan))
df_1.positions_pred_ref = np.where(
df_1['pred'] >= pct_rate, 0.0, np.where(df_1['pred'] <= -pct_rate, -amount, np.nan))
else:
df_1.positions_true_ref = np.where(
df_1['true'] >= pct_rate, amount, np.where(df_1['true'] <= -pct_rate, -amount, np.nan))
df_1.positions_pred_ref = np.where(
df_1['pred'] >= pct_rate, amount, np.where(df_1['pred'] <= -pct_rate, -amount, np.nan))
df_1.positions_pred_ref.fillna(method='ffill', inplace=True)
df_1.positions_pred_ref.fillna(0, inplace=True)
df_1.positions_true_ref.fillna(method='ffill', inplace=True)
df_1.positions_true_ref.fillna(0, inplace=True)
else:
df_1.positions_true_ref = pos_true
df_1.positions_pred_ref = pos_pred
# Signals / Signals_Size
df_1["signals_size_pred"] = df_1.positions_pred_ref.diff()
df_1.signals_size_pred.fillna(df_1.positions_pred_ref[0], inplace=True)
df_1["signals_size_true"] = df_1.positions_true_ref.diff()
df_1.signals_size_true.fillna(df_1.positions_true_ref[0], inplace=True)
df_1["signals_pred"] = np.where(df_1.signals_size_pred > 0, 1, np.where(df_1.signals_size_pred < 0, -1, 0))
df_1["signals_true"] = np.where(df_1.signals_size_true > 0, 1, np.where(df_1.signals_size_true < 0, -1, 0))
# Positions
df_1['positions_true'] = df_1.positions_true_ref / amount
df_1['positions_pred'] = df_1.positions_pred_ref / amount
for index in range(len(df_1)):
if logic == "long":
# True
# Scatter Stop
if df_1.positions_true[index] == 0 and pos_var_1 == 10:
df_1.signals_true_scatter.values[index] = 0
df_1.signals_true.values[index] = 0
pos_var_1 = 0
# Scatter Long
if df_1.positions_true[index] > 0 and df_1.signals_true[index] == 1:
pos_var_1 = 1
df_1.signals_true_scatter.values[index] = 1
c_ref_true = df_1.c.values[index]
# Scatter Exit-Long
elif df_1.positions_true[index] == 0 and df_1.signals_true[index] == -1:
pos_var_1 = 0
df_1.signals_true_scatter.values[index] = -2
# Stop Loss/Gain Logic
if pos_var_1 == 10:
df_1.positions_true.values[index] = 0
# Pred
# Scatter Stop
if df_1.positions_pred[index] == 0 and pos_var_2 == 10:
df_1.signals_pred_scatter.values[index] = 0
df_1.signals_pred.values[index] = 0
pos_var_2 = 0
# Scatter Long
if df_1.positions_pred[index] > 0 and df_1.signals_pred[index] == 1:
pos_var_2 = 1
df_1.signals_pred_scatter.values[index] = 1
c_ref_pred = df_1.c.values[index]
# Scatter Exit-Long
elif df_1.positions_pred[index] == 0 and df_1.signals_pred[index] == -1:
pos_var_2 = 0
df_1.signals_pred_scatter.values[index] = -2
# Stop Loss/Gain Logic
if pos_var_2 == 10:
df_1.positions_pred.values[index] = 0
if logic == "short":
# True
# Scatter Stop
if df_1.positions_true[index] == 0 and pos_var_1 == -10:
df_1.signals_true_scatter.values[index] = 0
df_1.signals_true.values[index] = 0
pos_var_1 = 0
# Scatter Short
if df_1.positions_true[index] < 0 and df_1.signals_true[index] == -1:
pos_var_1 = -1
df_1.signals_true_scatter.values[index] = -1
c_ref_true = df_1.c.values[index]
# Scatter Exit-Short
elif df_1.positions_true[index] == 0 and df_1.signals_true[index] == 1:
pos_var_1 = 0
df_1.signals_true_scatter.values[index] = 2
# Stop Loss/Gain Logic
if pos_var_1 == -10:
df_1.positions_true.values[index] = 0
# Pred
# Scatter Stop
if df_1.positions_pred[index] == 0 and pos_var_2 == -10:
df_1.signals_pred_scatter.values[index] = 0
df_1.signals_pred.values[index] = 0
pos_var_2 = 0
# Scatter Short
if df_1.positions_pred[index] < 0 and df_1.signals_pred[index] == -1:
pos_var_2 = -1
df_1.signals_pred_scatter.values[index] = -1
c_ref_pred = df_1.c.values[index]
# Scatter Exit-Short
elif df_1.positions_pred[index] == 0 and df_1.signals_pred[index] == 1:
pos_var_2 = 0
df_1.signals_pred_scatter.values[index] = 2
# Stop Loss/Gain Logic
if pos_var_2 == -10:
df_1.positions_pred.values[index] = 0
if logic == "long-short":
# True
# Scatter Long
if df_1.positions_true[index] > 0 and df_1.signals_true[index] == 1:
pos_var_1 = 1
df_1.signals_true_scatter.values[index] = 1
c_ref_true = df_1.c.values[index]
# Scatter Short
elif df_1.positions_true[index] < 0 and df_1.signals_true[index] == -1:
pos_var_1 = -1
df_1.signals_true_scatter.values[index] = -1
c_ref_true = df_1.c.values[index]
# Stop Loss/Gain Logic
if pos_var_1 == -10 or pos_var_1 == 10:
df_1.positions_true.values[index] = 0
# Pred
# Scatter Long
if df_1.positions_pred[index] > 0 and df_1.signals_pred[index] == 1:
pos_var_2 = 1
df_1.signals_pred_scatter.values[index] = 1
c_ref_pred = df_1.c.values[index]
# Scatter Short
elif df_1.positions_pred[index] < 0 and df_1.signals_pred[index] == -1:
pos_var_2 = -1
df_1.signals_pred_scatter.values[index] = -1
c_ref_pred = df_1.c.values[index]
# Stop Loss/Gain Logic
if pos_var_2 == -10 or pos_var_2 == 10:
df_1.positions_pred.values[index] = 0
if logic == "long-short-exit":
# True
# Stop Loss/Gain Logic
if pos_var_3 == 20 and df_1.signals_true.values[index] != | |
<reponame>kwilcox/flopy<gh_stars>0
from numpy import empty,zeros,ones,where
from flopy.mbase import Package
class ModflowMnw2(Package):
'Multi-node well 2 package class\n'
'''
NOTE: This implementation does not allow well loss parameters {Rw,Rskin,Kskin,B,C,P,CWC,PP} to vary along the length of a given well. It also
does not currently support data sections 2e, 2f, 2g, 2h, or 4b as defined in the data input instructions for the MNW2 package.
'''
def __init__( self, model, mnwmax=0, iwl2cb=-1, mnwprnt=0, aux=None,
wellid=None, nnodes=None, losstype=None, pumploc=0, qlimit=0, ppflag=0, pumpcap=0,
lay_row_col=None, ztop_zbotm_row_col=None, rw=0, rskin=0, kskin=0, b=0, c=0, p=0, cwc=0, pp=1,
## pumplay_pumprow_pumpcol=0, zpump=0,
## hlim=None, qcut=None, qfrcmn=None, qfrcmx=None,
## hlift=0, liftq0=0, liftqmax=0, hwtol=0,
## liftn=None, qn=None,
itmp=0,
wellid_qdes=None, capmult=0, cprime=0,
extension='mnw2', unitnumber=34 ):
Package.__init__(self, model, extension, 'MNW2', unitnumber) # Call ancestor's init to set self.parent, extension, name, and unit number
self.url = 'mnw2.htm'
self.nper = self.parent.nrow_ncol_nlay_nper[-1]
self.heading = '# Multi-node well 2 (MNW2) file for MODFLOW, generated by Flopy'
self.mnwmax = mnwmax #-maximum number of multi-node wells to be simulated
self.iwl2cb = iwl2cb #-flag and unit number
self.mnwprnt = mnwprnt #-verbosity flag
self.aux = aux #-list of optional auxilary parameters
self.wellid = wellid #-array containing well id's (shape = (MNWMAX))
self.nnodes = nnodes #-array containing # of nodes to be simulated for each well (shape = (MNWMAX))
self.losstype = losstype #-array containing head loss type for each well (shape = (MNWMAX))
self.pumploc = pumploc #-array containing integer flag pertaining to the location of a pump intake (if any) (shape = (MNWMAX))
self.qlimit = qlimit #-array containing integer flag indicating if water levels will be used to constrain pumping (shape = (MNWMAX))
self.ppflag = ppflag #-array containing integer flag indicating if water levels will be corrected for partial penetration (shape = (MNWMAX))
self.pumpcap = pumpcap #-array containing integer flag indicating if discharge from a pumping well is adjusted for changes in lift (shape = (MNWMAX))
self.lay_row_col = lay_row_col #-list of arrays containing lay, row, and col for all well nodes [NNODES > 0](shape = (NNODES,3), length = MNWMAX)
self.ztop_zbotm_row_col = ztop_zbotm_row_col #-list of arrays containing top and botm elevation of all open intervals [NNODES < 0](shape = (abs(NNODES),2), length = MNWMAX)
self.rw = rw #-array containing Rw (shape = (MNWMAX))
self.rskin = rskin #-array containing Rskin (shape = (MNWMAX))
self.kskin = kskin #-array containing Kskin (shape = (MNWMAX))
self.b = b #-array containing B (shape = (MNWMAX))
self.c = c #-array containing C (shape = (MNWMAX))
self.p = p #-array containing P (shape = (MNWMAX))
self.cwc = cwc #-array containing CWC (shape = (MNWMAX))
self.pp = pp #-array containing PP (shape = (MNWMAX))
## self.pumplay_pumprow_pumpcol = pumplay_pumprow_pumpcol #-array containing lay,row,col of pump intake for each well (if any) (shape = (MNWMAX,3))
## self.zpump = zpump #-array containing elevation of pump intake for each well (if any) (shape = (MNWMAX))
## self.hlim = hlim #-list of arrays containing limiting water level which constrains flow (shape = MNWMAX, length <= NPER)
## self.qcut = qcut #-list of arrays containing integer flag indicating how pumping limits will be specified (shape = MNWMAX, length <= NPER)
## self.qfrcmn = qfrcmn #-list of arrays containing mimimum pumping rate or fraction of original pumping rate (shape = MNWMAX, length <= NPER)
## self.qfrcmx = qfrcmx #-list of arrays containing mimimum pumping rate which must be exceeded to reactivate well (shape = MNWMAX, length <= NPER)
## self.hlift = hlift #-array containing the reference elevation of the discharge point for each well (shape = MNWMAX)
## self.liftq0 = liftq0 #-array containing the value of lift that exceeds the capacity of the pump (shape = MNWMAX)
## self.liftqmax = liftqmax #-array containing the value of lift that corresponds to the maximum pumping rate (shape = MNWMAX)
## self.hwtol = hwtol #-array containing the miminum absolute value of change in computed water level between iterations (shape = MNWMAX)
## self.liftn = liftn #-list of arrays containing the value of lift that corresponds to a known value of discharge (Qn) (shape = MNWMAX, length = pumpcap)
## self.qn = qn #-list of arrays containing the value of discharge corresponding to LIFTn (shape = MNWMAX, length = pumpcap)
self.itmp = itmp #-array containing # of wells to be simulated for each stress period (shape = (NPER))
self.wellid_qdes = wellid_qdes #-list of arrays containing desired Q for each well in each stress period (shape = (NPER,MNWMAX,2))
## self.capmult = capmult #-array containing CapMult flag for each well in each stress period (shape = (NPER,MNWMAX))
## self.cprime = cprime #-array containing Cprime for each well in each stress period (shape = (NPER,MNWMAX))
#-create empty arrays of the correct size
'''
NOTE: some arrays are not pre-formatted here as their shapes vary from well to well and from period to period.
'''
self.wellid = empty( (self.mnwmax),dtype='S25' )
self.nnodes = zeros( (self.mnwmax),dtype='int32' )
self.losstype = empty( (self.mnwmax),dtype='S25' )
self.pumploc = zeros( (self.mnwmax),dtype='int32' )
self.qlimit = zeros( (self.mnwmax),dtype='int32' )
self.ppflag = zeros( (self.mnwmax),dtype='int32' )
self.pumpcap = zeros( (self.mnwmax),dtype='int32' )
self.rw = zeros( self.mnwmax,dtype='float32' )
self.rskin = zeros( self.mnwmax,dtype='float32' )
self.kskin = zeros( self.mnwmax,dtype='float32' )
self.b = zeros( self.mnwmax,dtype='float32' )
self.c = zeros( self.mnwmax,dtype='float32' )
self.p = zeros( self.mnwmax,dtype='float32' )
self.cwc = zeros( self.mnwmax,dtype='float32' )
self.pp = zeros( self.mnwmax,dtype='float32' )
## self.pumplay_pumprow_pumpcol = empty( (self.mnwmax,3),dtype='int32' )
## self.zpump = empty( (self.mnwmax),dtype='float32' )
## self.hlift = empty( (self.mnwmax),dtype='float32' )
## self.liftq0 = empty( (self.mnwmax),dtype='float32' )
## self.liftqmax = empty( (self.mnwmax),dtype='float32' )
## self.hwtol = empty( (self.mnwmax),dtype='float32' )
self.itmp = zeros( self.nper,dtype='int32' )
## self.capmult = empty( (self.nper,self.mnwmax),dtype='float32' )
## self.cprime = empty( (self.nper,self.mnwmax),dtype='float32' )
#-assign values to arrays
self.assignarray_old( self.wellid, wellid )
self.assignarray_old( self.nnodes, nnodes )
self.assignarray_old( self.losstype, losstype )
self.assignarray_old( self.pumploc, pumploc )
self.assignarray_old( self.qlimit, qlimit )
self.assignarray_old( self.ppflag, ppflag )
self.assignarray_old( self.pumpcap, pumpcap )
self.assignarray_old( self.rw, rw )
self.assignarray_old( self.rskin, rskin )
self.assignarray_old( self.kskin, kskin )
self.assignarray_old( self.b, b )
self.assignarray_old( self.c, c )
self.assignarray_old( self.p, p )
self.assignarray_old( self.cwc, cwc )
self.assignarray_old( self.pp, pp )
## self.assignarray_old( self.pumplay_pumprow_pumpcol, pumplay_pumprow_pumpcol )
## self.assignarray_old( self.zpump, zpump )
## self.assignarray_old( self.hlift, hlift )
## self.assignarray_old( self.liftq0, liftq0 )
## self.assignarray_old( self.liftqmax, liftqmax )
## self.assignarray_old( self.hwtol, hwtol )
self.assignarray_old( self.itmp, itmp )
## self.assignarray_old( self.capmult, capmult )
## self.assignarray_old( self.cprime, cprime )
#-input format checks:
lossTypes = ['NONE','THIEM','SKIN','GENERAL','SPECIFYcwc']
for i in range(mnwmax):
assert len(self.wellid[i].split(' ')) == 1, 'WELLID (%s) must not contain spaces' % self.wellid[i]
assert self.losstype[i] in lossTypes, 'LOSSTYPE (%s) must be one of the following: NONE, THIEM, SKIN, GENERAL, or SPECIFYcwc' % self.losstype[i]
assert self.itmp[0] >= 0, 'ITMP must be greater than or equal to zero for the first time step.'
assert self.itmp.max() <= self.mnwmax, 'ITMP cannot exceed maximum number of wells to be simulated.'
self.parent.add_package(self)
def write_file( self ):
#-open file for writing
f_mnw2 = open( self.file_name[0], 'w' )
#-write header
f_mnw2.write( '%s\n' % self.heading )
#-Section 1 - MNWMAX, IWL2CB, MNWPRNT {OPTION}
auxParamString = ''
if self.aux != None:
for param in self.aux:
auxParamString = auxParamString + 'AUX %s ' % param
f_mnw2.write( '%10i%10i%10i %s\n' % ( self.mnwmax,
self.iwl2cb,
self.mnwprnt,
auxParamString ) )
#-Section 2 - Repeat this section MNWMAX times (once for each well)
for i in range(self.mnwmax):
#-Section 2a - WELLID, NNODES
f_mnw2.write( '%s%10i\n' % ( self.wellid[i], self.nnodes[i] ) )
#-Section 2b - LOSSTYPE, PUMPLOC, Qlimit, PPFLAG, PUMPCAP
f_mnw2.write( '%s %10i%10i%10i%10i\n' % ( self.losstype[i],
self.pumploc[i],
self.qlimit[i],
self.ppflag[i],
self.pumpcap[i] ) )
#-Section 2c - {Rw, Rskin, Kskin, B, C, P, CWC}
if self.losstype[i] == 'THIEM':
f_mnw2.write( '%10f\n' % ( self.rw[i] ) )
elif self.losstype[i] == 'SKIN':
f_mnw2.write( '%10f %10f %10f\n' % ( self.rw[i],
self.rskin[i],
self.kskin[i] ) )
elif self.losstype[i] == 'GENERAL':
f_mnw2.write( '%10f %10f %10f %10f\n' % ( self.rw[i],
self.b[i],
self.c[i],
self.p[i] ) )
elif self.losstype[i] == 'SPECIFYcwc':
f_mnw2.write( '%10f\n' % ( self.cwc[i] ) )
#-Section 2d - Repeat sections 2d-1 or 2d-2 once for each open interval
#-Section 2d-1 - NNODES > 0; LAY, ROW, COL {Rw, Rskin, Kskin, B, C, P, CWC, PP}
absNnodes = abs(self.nnodes[i])
if self.nnodes[i] > 0:
for n in range(absNnodes):
f_mnw2.write( '%10i%10i%10i\n' % ( self.lay_row_col[i][n,0],
self.lay_row_col[i][n,1],
self.lay_row_col[i][n,2] ) )
| |
"""
Created on 03/27/18.
@author: <NAME>, <NAME>
"""
import json
import numpy as np
from collections import OrderedDict
import multiprocessing as mp
import sys
from os import listdir, path
import re
import matplotlib.pyplot as plt
import matplotlib as mpl
def json_to_ordered_dict(file):
"""
Reads a timeline.json file output by Tensorflow/libcupti and returns and OrderedDict object
:param file: .json file.
:return: OrderedDict
"""
with open(file, mode='r') as f:
def _as_ordered_dict(val):
return OrderedDict(val)
output = json.load(f, object_hook=_as_ordered_dict, object_pairs_hook=_as_ordered_dict)
dic = OrderedDict(output)
return dic
def get_all_ops(trace_dic):
"""
Params:
trace_dic: collections.OrderedDict of traceEvent
Return: list of dictionaries of all ops.
"""
try:
trace_events = trace_dic['traceEvents']
except KeyError:
print('Not valid GPU trace dict object.')
sys.exit()
all_ops = []
for trace in trace_events:
try:
if trace['cat'] == 'Op':
all_ops.append(trace)
except KeyError:
pass
return all_ops
def get_stream_all(trace_dic):
"""
Params:
trace_dic: collections.OrderedDict of traceEvent
Return: pid of GPU/stream:all, (stream, pid) dictionary
"""
try:
trace_events = trace_dic['traceEvents']
except KeyError:
print('Not valid GPU trace dict object.')
sys.exit()
all_procs = []
for trace in trace_events:
try:
if trace['name'] == 'process_name':
all_procs.append((trace['args']['name'], trace['pid']))
except KeyError:
pass
dic_procs = dict(all_procs)
pid = dic_procs['/device:GPU:0/stream:all Compute']
return dic_procs, pid
def get_unique_ops_names(all_ops):
"""
Find unique op names.
Params:
all_ops: list, of dictionary of all operations.
Return: list of unique op names.
"""
return set(op['name'] for op in all_ops)
def get_wall_duration(op_names, all_ops, pid_list=(11, 7, 13, 15, 9)):
"""
Calculates wall duration for each op in op_names.
Params:
op_names: list (str), names of ops of interest.
pid_list: list (str), names of pid to include.
all_ops: output of get_all_ops().
Return:
total wall duration, dict['op'] = wall duration.
"""
# 1. Construct dictionary of op with name matching op_names
ops_dic = OrderedDict()
for name in op_names:
ops = []
for op in all_ops:
if op['name'] == name:
ops.append(op)
ops_dic[name] = ops
# 2. get duration for each op
op_dict = OrderedDict()
total_dur = 0
for op_name in op_names:
op_dur = 0
for itm in ops_dic[op_name]:
if itm['pid'] in pid_list:
op_dur += itm['dur']
op_dict[op_name] = op_dur * 1e-3 # convert from us to ms
total_dur += op_dur * 1e-3
# fixing the NCCL key:
op_dict['unknown (nccl AllReduceKernel_sum_)'] = op_dict.pop('unknown')
# Sorting durations:
sorted_dur = sorted(op_dict.items(), key=lambda x: x[1])[::-1]
# sorted_dur = sorted(op_dict.items(), key=operator.itemgetter(1))
return OrderedDict(sorted_dur), total_dur
def print_timeline_stats(sorted_dur, total_dur, min_msec=5):
"""
Prints the total time and times per op so long as the time was > min_msec
:param sorted_dur: OrderedDict object with time per op. Times in msec
:param total_dur: Number - total wall time per step. Time in msec
:param min_msec: Number, optional - minimum wall time for op
"""
print('Total Wall Duration (ms): %4.3f\n' % total_dur)
print('OPS with wall duration > 5 ms:')
for key, val in sorted_dur.items():
if val > min_msec:
print('%s : %3.3f ms' % (key, val))
def parse_single_timeline(curr_file):
"""
Parses a single timeline file and extracts the time per op and total wall time
:param curr_file: str / unicode - path to a single timeline .json file
:return dicts: OrderedDict object with time per op. Times in msec
:return tot_times: Number - total wall time per step. Time in msec
"""
dic = json_to_ordered_dict(curr_file)
all_ops = get_all_ops(dic)
unique_op_names = get_unique_ops_names(all_ops)
proc_dic, stream_all_pid = get_stream_all(dic)
sorted_dur_dicts, total_dur = get_wall_duration(unique_op_names, all_ops, pid_list=[stream_all_pid])
return sorted_dur_dicts, total_dur
def parse_all_timeline_files(folder, prefix='timeline', suffix='.json'):
"""
Parses all timeline files in the given dictionary to extract the times per op and total wall time
:param folder: str / unicode - path to directory containing all the timeline json files
:param prefix: str / unicode (optional) - Prefix for the file names. Default = 'timeline'
:param suffix: str / unicode (optional) - suffix for the file names. Default = '.json'
:return dicts: list of OrderedDict objects per timeline file. Times in msec
:return tot_times: list of Numbers with the total wall time per step. Times in msec
"""
files = []
for name in listdir(folder):
if name.startswith(prefix) and name.endswith(suffix):
files.append(path.join(path.abspath(folder), name))
dicts = []
tot_times = []
if len(files) > 16:
cores = 4
pool = mp.Pool(cores)
jobs = pool.imap(parse_single_timeline, files)
results = [j for j in jobs]
pool.close()
for item in results:
dicts.append(item[0])
tot_times.append(item[1])
else:
for curr_file in files:
sorted_dur_dicts, total_dur = parse_single_timeline(curr_file)
dicts.append(sorted_dur_dicts)
tot_times.append(total_dur)
return dicts, tot_times
def visualize_op_times(dicts, tot_times, do_hists=True, nrows=3, ncols=3):
"""
Plots the total time, time taken by the N-1 most time consuming ops
:param dicts: list of OrderedDict objects per timeline file. Times in msec
:param tot_times: list of Numbers with the total wall time per step. Times in msec
:param do_hists: bool, optional - if True - plots histograms of the times. Else, bar graph
:param nrows: int, optional - Number of rows in the plot
:param ncols: int, optional - Number of columns in the plot
:return: fig: matplotlib.pyplot.Figure object
"""
mpl.rc('figure', figsize=(5, 5))
mpl.rc('lines', linewidth=2)
mpl.rc('axes', labelsize=16, titlesize=16)
mpl.rc('figure', titlesize=20)
mpl.rc('font', size=14) # global font size
mpl.rc('legend', fontsize=16, fancybox=True)
mpl.rc('xtick.major', size=6)
mpl.rc('xtick.minor', size=4)
if do_hists:
y_label = 'Counts'
x_label = 'Time (msec)'
else:
y_label = 'Time (msec)'
x_label = 'Horovod Rank'
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(3.75 * nrows, 3 * ncols))
if do_hists:
axes.flat[0].hist(tot_times)
else:
axes.flat[0].bar(range(len(tot_times)), tot_times)
axes.flat[0].set_title('Step time (msec)')
axes.flat[0].set_ylabel(y_label)
for ind, axis, name in zip(range(1, nrows * ncols), axes.flat[1:], list(dicts[0].keys())[:nrows * ncols - 1]):
vals = [x[name] for x in dicts]
if do_hists:
axis.hist(vals)
else:
axis.bar(range(len(vals)), vals)
axis.set_title(name)
if ind % ncols == 0:
axis.set_ylabel(y_label)
if ind >= (nrows - 1) * ncols:
axis.set_xlabel(x_label)
fig.suptitle('Wall Times', y=1.03)
fig.tight_layout()
return fig
def calc_flops(timeline, analytical_ops, op_names):
"""
Calculate FLOPS using duration per OP (from CUPTI) and analytical # of ops
:param timeline:
:param analytical_ops:
:param op_names:
:return: Total FLOPS (summed over all ops in op_names), dict['op_name'] = FLOPS
"""
# TODO:
# 1. Need dictionary with keys matching names in op_names that provides the total # of ops.
# 2. parse timeline with load_hardware_trace_json(file)
# 3. get all ops info with get_all_ops(trace_dic)
# 4. get wall duration for each op in op_names with get_wall_duration()
# 5. divide analytical ops by wall duration for each op in op_names.
# 6. Return total FLOPS and dictionary with key=op_name and val= FLOPS.
pass
def parse_nvprof_csv(nvprof_csv):
"""
Extract data from nvprof and calculate/return OPS, FLOPS.
"""
p = re.compile(r'Device')
with open(nvprof_csv) as f:
skip_ln = 0
while (True):
line = f.readline()
match = p.search(line)
if match:
fields = line
skip_ln += 1
break
if skip_ln > 20:
print('The provided file is missing nvprof headers!')
break
skip_ln += 1
fields = fields.split(',')
# Now that the number of header rows are known, the rest can be extracted easily
arr = np.genfromtxt(nvprof_csv, skip_header=skip_ln, delimiter='Floating Point Operations(Single Precision)',
comments='==', dtype=None, encoding=None)
logs = dict()
# it would have been easier if we could use pandas dataframes but that's not available
for lhs, rhs in arr:
lhs_splits = lhs.split(',')
rhs_splits = rhs.split(',')
logs[','.join(lhs_splits[1:-3])] = {'invocations': int(lhs_splits[-3]),
'min_ops': int(float(rhs_splits[1])),
'max_ops': int(float(rhs_splits[2])),
'avg_ops': int(float(rhs_splits[3]))}
return logs
def sum_nvprof_ops(nvprof_dict):
sum_min = 0
sum_max = 0
sum_avg = 0
for key, val in nvprof_dict.items():
sum_min += val['min_ops']
sum_max += val['max_ops']
sum_avg += val['avg_ops']
return sum_min, sum_max, sum_avg
def cluster_nvprof_ops(nvprof_dict, verbose=False):
translation = {'convolve_sgemm': 'conv', 'volta_gcgemm': 'volta_gcgemm', 'relu': 'relu',
'fft2d_r2c': 'fft2d_r2c', 'fft2d_c2r': 'fft2d_c2r',
'EigenMetaKernel<Eigen::TensorEvaluator': 'EigenMetaKernel<Eigen::TensorEvaluator',
'stridedB': 'volta_scudnn_stridedB_splitK', 'gcgemm': 'volta_gcgemm_nt',
'cgemm': 'volta_cgemm_tn', 'cudnn::detail::wgrad_alg0_engine': 'cudnn::detail::wgrad_alg0_engine',
'volta_sgemm': 'volta_sgemm', 'void cudnn::detail::dgrad_engine': 'void cudnn::detail::dgrad_engine',
'void DSE::vector_fft': 'void DSE::vector_fft',
'void pooling_bw_kernel_max_nchw': 'void pooling_bw_kernel_max_nchw',
'pooling_bw_kernel': 'pooling_bw_kernel', 'pooling_fw': 'pooling_fw',
'winograd_nonfused': 'winograd_nonfused', 'regular_fft': 'regular_fft', 'bn_bw': 'batch_norm_bw',
'bn_fw': 'batch_norm_forward', }
new_dict = dict()
count = 0
for key, val in nvprof_dict.items():
grouped = False
new_val = val.copy()
for spec_name, gen_name in translation.items():
if spec_name in key and not grouped:
if verbose:
print(key[:30] + ' >> contains >> ' + spec_name)
old_val = new_dict.get(gen_name, None)
if old_val is not None:
if verbose:
print('existing entry:', old_val)
print('current entry:', new_val)
for prop_name in ['min_ops', 'max_ops', 'avg_ops', 'invocations']:
new_val[prop_name] += old_val[prop_name]
else:
if verbose:
print('no prior entry found. Using current entry:', new_val)
new_dict[gen_name] = new_val
grouped = True
count += 1
if not grouped:
if verbose:
print('Could not group key:', key)
new_dict[key] = new_val
if verbose:
print('')
print('Collapsed | |
<gh_stars>0
"""
Email-based login system
Evennia contrib - Griatch 2012
This is a variant of the login system that requires an email-address
instead of a username to login.
This used to be the default Evennia login before replacing it with a
more standard username + password system (having to supply an email
for some reason caused a lot of confusion when people wanted to expand
on it. The email is not strictly needed internally, nor is any
confirmation email sent out anyway).
Installation is simple:
To your settings file, add/edit the line:
```python
CMDSET_UNLOGGEDIN = "contrib.email_login.UnloggedinCmdSet"
```
That's it. Reload the server and try to log in to see it.
The initial login "graphic" will still not mention email addresses
after this change. The login splashscreen is taken from strings in
the module given by settings.CONNECTION_SCREEN_MODULE.
"""
import re
from django.conf import settings
from evennia.players.models import PlayerDB
from evennia.objects.models import ObjectDB
from evennia.server.models import ServerConfig
from evennia.comms.models import ChannelDB
from evennia.commands.cmdset import CmdSet
from evennia.utils import create, logger, utils, ansi
from evennia.commands.default.muxcommand import MuxCommand
from evennia.commands.cmdhandler import CMD_LOGINSTART
# limit symbol import for API
__all__ = ("CmdUnconnectedConnect", "CmdUnconnectedCreate",
"CmdUnconnectedQuit", "CmdUnconnectedLook", "CmdUnconnectedHelp")
MULTISESSION_MODE = settings.MULTISESSION_MODE
CONNECTION_SCREEN_MODULE = settings.CONNECTION_SCREEN_MODULE
CONNECTION_SCREEN = ""
try:
CONNECTION_SCREEN = ansi.parse_ansi(utils.random_string_from_module(CONNECTION_SCREEN_MODULE))
except Exception:
pass
if not CONNECTION_SCREEN:
CONNECTION_SCREEN = "\nEvennia: Error in CONNECTION_SCREEN MODULE (randomly picked connection screen variable is not a string). \nEnter 'help' for aid."
class CmdUnconnectedConnect(MuxCommand):
"""
Connect to the game.
Usage (at login screen):
connect <email> <password>
Use the create command to first create an account before logging in.
"""
key = "connect"
aliases = ["conn", "con", "co"]
locks = "cmd:all()" # not really needed
def func(self):
"""
Uses the Django admin api. Note that unlogged-in commands
have a unique position in that their `func()` receives
a session object instead of a `source_object` like all
other types of logged-in commands (this is because
there is no object yet before the player has logged in)
"""
session = self.caller
arglist = self.arglist
if not arglist or len(arglist) < 2:
session.msg("\n\r Usage (without <>): connect <email> <password>")
return
email = arglist[0]
password = arglist[1]
# Match an email address to an account.
player = PlayerDB.objects.get_player_from_email(email)
# No playername match
if not player:
string = "The email '%s' does not match any accounts." % email
string += "\n\r\n\rIf you are new you should first create a new account "
string += "using the 'create' command."
session.msg(string)
return
# We have at least one result, so we can check the password.
if not player.check_password(password):
session.msg("Incorrect password.")
return
# Check IP and/or name bans
bans = ServerConfig.objects.conf("server_bans")
if bans and (any(tup[0] == player.name for tup in bans)
or
any(tup[2].match(session.address[0]) for tup in bans if tup[2])):
# this is a banned IP or name!
string = "{rYou have been banned and cannot continue from here."
string += "\nIf you feel this ban is in error, please email an admin.{x"
session.msg(string)
session.execute_cmd("quit")
return
# actually do the login. This will call all hooks.
session.sessionhandler.login(session, player)
class CmdUnconnectedCreate(MuxCommand):
"""
Create a new account.
Usage (at login screen):
create \"playername\" <email> <password>
This creates a new player account.
"""
key = "create"
aliases = ["cre", "cr"]
locks = "cmd:all()"
def parse(self):
"""
The parser must handle the multiple-word player
name enclosed in quotes:
connect "Long name with many words" <EMAIL> mypassw
"""
super(CmdUnconnectedCreate, self).parse()
self.playerinfo = []
if len(self.arglist) < 3:
return
if len(self.arglist) > 3:
# this means we have a multi_word playername. pop from the back.
password = self.arglist.pop()
email = self.arglist.pop()
# what remains is the playername.
playername = " ".join(self.arglist)
else:
playername, email, password = self.arglist
playername = playername.replace('"', '') # remove "
playername = playername.replace("'", "")
self.playerinfo = (playername, email, password)
def func(self):
"Do checks and create account"
session = self.caller
try:
playername, email, password = self.playerinfo
except ValueError:
string = "\n\r Usage (without <>): create \"<playername>\" <email> <password>"
session.msg(string)
return
if not re.findall('^[\w. @+-]+$', playername) or not (0 < len(playername) <= 30):
session.msg("\n\r Playername can be max 30 characters, or less. Letters, spaces, dig\
its and @/./+/-/_ only.") # this echoes the restrictions made by django's auth module.
return
if not email or not password:
session.msg("\n\r You have to supply an e-mail address followed by a password." )
return
if not utils.validate_email_address(email):
# check so the email at least looks ok.
session.msg("'%s' is not a valid e-mail address." % email)
return
# Run sanity and security checks
if PlayerDB.objects.filter(username=playername):
# player already exists
session.msg("Sorry, there is already a player with the name '%s'." % playername)
return
if PlayerDB.objects.get_player_from_email(email):
# email already set on a player
session.msg("Sorry, there is already a player with that email address.")
return
if len(password) < 3:
# too short password
string = "Your password must be at least 3 characters or longer."
string += "\n\rFor best security, make it at least 8 characters long, "
string += "avoid making it a real word and mix numbers into it."
session.msg(string)
return
# everything's ok. Create the new player account.
try:
default_home = ObjectDB.objects.get_id(settings.DEFAULT_HOME)
typeclass = settings.BASE_CHARACTER_TYPECLASS
permissions = settings.PERMISSION_PLAYER_DEFAULT
try:
new_player = create.create_player(playername, email, password,
permissions=permissions)
except Exception as e:
session.msg("There was an error creating the default Player/Character:\n%s\n If this problem persists, contact an admin." % e)
logger.log_trace()
return
# This needs to be set so the engine knows this player is
# logging in for the first time. (so it knows to call the right
# hooks during login later)
new_player.db.FIRST_LOGIN = True
# join the new player to the public channel
pchanneldef = settings.CHANNEL_PUBLIC
if pchanneldef:
pchannel = ChannelDB.objects.get_channel(pchanneldef[0])
if not pchannel.connect(new_player):
string = "New player '%s' could not connect to public channel!" % new_player.key
logger.log_err(string)
if MULTISESSION_MODE < 2:
# if we only allow one character, create one with the same name as Player
# (in mode 2, the character must be created manually once logging in)
new_character = create.create_object(typeclass, key=playername,
location=default_home, home=default_home,
permissions=permissions)
# set playable character list
new_player.db._playable_characters.append(new_character)
# allow only the character itself and the player to puppet this character (and Immortals).
new_character.locks.add("puppet:id(%i) or pid(%i) or perm(Immortals) or pperm(Immortals)" %
(new_character.id, new_player.id))
# If no description is set, set a default description
if not new_character.db.desc:
new_character.db.desc = "This is a Player."
# We need to set this to have @ic auto-connect to this character
new_player.db._last_puppet = new_character
# tell the caller everything went well.
string = "A new account '%s' was created. Welcome!"
if " " in playername:
string += "\n\nYou can now log in with the command 'connect %s <your password>'."
else:
string += "\n\nYou can now log with the command 'connect %s <your password>'."
session.msg(string % (playername, email))
except Exception:
# We are in the middle between logged in and -not, so we have
# to handle tracebacks ourselves at this point. If we don't,
# we won't see any errors at all.
session.msg("An error occurred. Please e-mail an admin if the problem persists.")
logger.log_trace()
class CmdUnconnectedQuit(MuxCommand):
"""
We maintain a different version of the `quit` command
here for unconnected players for the sake of simplicity. The logged in
version is a bit more complicated.
"""
key = "quit"
aliases = ["q", "qu"]
locks = "cmd:all()"
def func(self):
"Simply close the connection."
session = self.caller
session.msg("Good bye! Disconnecting ...")
session.session_disconnect()
class CmdUnconnectedLook(MuxCommand):
"""
This is an unconnected version of the `look` command for simplicity.
This is called by the server and kicks everything in gear.
All it does is display the connect screen.
"""
key = CMD_LOGINSTART
aliases = ["look", "l"]
locks = "cmd:all()"
def func(self):
"Show the connect screen."
self.caller.msg(CONNECTION_SCREEN)
class CmdUnconnectedHelp(MuxCommand):
"""
This is an unconnected version of the help command,
for simplicity. It shows a pane of info.
"""
key = "help"
aliases = ["h", "?"]
locks = "cmd:all()"
def func(self):
"Shows help"
string = \
"""
You are not yet logged into the game. Commands available at this point:
{wcreate, connect, look, help, quit{n
To login to the system, you need to do one of the following:
{w1){n If you have no previous account, you need to use | |
"""dspritesb.py
Stuff to know:
- a basic class to load the dSprites and paint a randomly centered 2D gaussian blob behind each sprite.
- uses pytorch DataLoader.
- dspritesb.demo() runs basic unit tests
"""
from torch.utils.data import Dataset, DataLoader
import torch
import os
import numpy as np
from matplotlib import pyplot as plt
from torchvision import transforms, utils
def generateLatentLinearMotion(N, n_timesteps, min_travelled_distance, min_coordinate=0., max_coordinate=1.):
""" generateLatentLinearMotion(N, n_timesteps, min_travelled_distance, min_coordinate=0., max_coordinate=1.):
general purpose function that samples an equidistantly spaced linear trajectory
in the space of [min_coordinate, max_coordinate] x [min_coordinate, max_coordinate] (e.g., [0,1] x [0,1])
Args:
N: number of trajectories
n_timesteps: number of samples on the line
min_travelled_distance: how far should the object travel at least
min_coordinate: lower bound on both x and y coordinates
max_coordinate: upper bound on both x and y coordinates
"""
d_coordinate = max_coordinate - min_coordinate
def dist(from_to_points):
dists = np.sqrt(np.sum((from_to_points[:,0,:] - from_to_points[:,1,:])**2, axis=1))
return dists
def tooCloseToEachOther(dists):
idx = np.where(dists <= min_travelled_distance)[0]
n = len(idx)
return n, idx
# step 1: sample start and end point with minimum distance between the two
start_and_end_points = d_coordinate * (np.random.rand(N, 2, 2) - min_coordinate)
n_to_close, idx = tooCloseToEachOther(dist(start_and_end_points))
while n_to_close>0:
start_and_end_points[idx,:,:] = d_coordinate * (np.random.rand(n_to_close, 2, 2) - min_coordinate)
n_to_close, idx = tooCloseToEachOther(dist(start_and_end_points))
# step 2: interpolate intermediate samples (chop line into n_timesteps points)
motion_latents = np.moveaxis(np.linspace(start_and_end_points[:,0,:],start_and_end_points[:,1,:], num=n_timesteps),0,-1).astype('float32')
return motion_latents
class dSpriteBackgroundDatasetTime(Dataset):
""" dSprite with (gaussian) background dataset and time dimension (moving foreground object)
__getitem__ returns a 4D Tensor [m_timepoints, n_channels, image_size_x, image_size_y]
"""
def __init__(self, idx=None, shapetype='dsprite', transform=None, data_dir='../data/dsprites-dataset/',pixels=64, n_timesteps = 10, min_travelled_distance = 0.1):
"""
Args:
shapetype (string): circle or dsprite
transform (callable, optional): Optional transform to be applied
on a sample.
data_dir (string): path to download(ed) dsprites dataset
pixels: x,y number of pixels of sample (before transform)
n_timesteps: number of timesteps for the movie
min_travelled_distance: how far should the object travel at least (image assumed to have size 1 x 1. Therefore, the maximum distance travelled can be sqrt(2))
"""
self.shapetype = shapetype
self.pixels = pixels
self.n_timesteps = n_timesteps
self.n_channels = 1
self.min_travelled_distance = min_travelled_distance
# Load dataset
if shapetype == 'dsprite':
raise Exception('moving dSprite is not implemented')
#data = loadDspriteFile(data_dir)
#
#self.imgs = data['imgs']*255
#self.latents_values = data['latents_values'].astype('float32')
#self.latents_classes = data['latents_classes']
#metadata = data['metadata'][()]
#self.latents_sizes = metadata['latents_sizes']
#self.latents_bases = np.concatenate((self.latents_sizes[::-1].cumprod()[::-1][1:],
# np.array([1,])))
elif shapetype == 'circle':
"""
latents_values are of shape (N x 4 x n_timesteps), where N = self.__len__()
latents_value[0,:,0] -> [x Background, y Background, x Circle, y Circle]
"""
self.latents_bases = [200000]
# grid points from which background gaussian x,y coordinates are drawn
self.background_1d_sampling_points = np.linspace(0,1,32,dtype='float32')
self.latents_values = self.generateLatentSequence()
if idx is not None:
self.latents_values = self.latents_values[idx,:]
self.latents_bases[0] = np.shape(self.latents_values)[0]
if transform is None:
self.transform = transforms.Compose([transforms.ToPILImage(),
transforms.ToTensor()])
else:
self.transform = transforms.Compose([transforms.ToPILImage(),
transform,
transforms.ToTensor()])
def __len__(self):
return self.latents_bases[0]
def __getitem__(self, idx, mu=None):
# Set up foreground object
if self.shapetype == 'circle':
center = (0.75*self.pixels)*self.latents_values[idx,-2:,:] + np.array([1/8,1/8])[:,np.newaxis]*self.pixels
# create foreground movie
# loop over time to create succesive circle images
foreground = 255*self.circle2D(center[:,0])[:,:,:,np.newaxis]
for t in range(1,self.n_timesteps):
###TODO: translate latent scale to radius
foreground = np.append(foreground, 255*self.circle2D(center[:,t])[:,:,:,np.newaxis], axis=3)
elif self.shapetype == 'dsprite':
foreground = self.pick_dSprite(idx)
# Set up background
mu = self.latents_values[idx,:2,:]
# create background movie
bg = self.gaussian2D(mu[:,0])
bg = (255*bg).reshape(bg.shape+(1,)+(1,))
if self.background_static: # if background is static, we can save some time
background = np.repeat(bg,self.n_timesteps,axis=3)
else: # otherwise loop over background latents
background = bg
for t in range(1, self.n_timesteps):
bg = self.gaussian2D(mu[:,t])
bg = (255*bg).reshape(bg.shape+(1,)+(1,))
background = np.append(background, bg, axis=3)
# Combine foreground and background
sample = np.clip(foreground+0.8*background,0,255).astype('uint8')
# Output
latent = self.latents_values[idx,:,:]
# transform indididual images sequentially
transf_sample = self.transform(sample[:,:,:,0]).unsqueeze(0)
if self.transform:
for t in range(1, self.n_timesteps):
transf_sample = torch.cat((transf_sample, self.transform(sample[:,:,:,t]).unsqueeze(0)), dim=0)
# transf_sample: [n_timesteps, n_channels, n_resizedpixel, n_resizedpixel]
return transf_sample,latent
def generateLatentSequence(self):
N = self.__len__()
np.random.seed(0)
# step 1: create forground latens as linear motion
foreground_latents = generateLatentLinearMotion(N = N,
n_timesteps = self.n_timesteps,
min_travelled_distance = self.min_travelled_distance,
min_coordinate = 0.0,
max_coordinate = 1.0)
# step 2: static background
background_latents = np.zeros_like(foreground_latents, dtype='float32')
# background x-coordinate (constant across frames)
background_latents[:,0,:] = np.random.choice(self.background_1d_sampling_points, N, replace=True)[:,np.newaxis]
# background y-coordinate (constant across frames)
background_latents[:,1,:] = np.random.choice(self.background_1d_sampling_points, N, replace=True)[:,np.newaxis]
self.background_static = True
latents = np.concatenate((background_latents,foreground_latents),axis=1)
return latents
def arbitraryCircle(self,objx=None,objy=None,backx=None,backy=None,radius=None):
# Pick an arbitrary sample by directly addressing it
# if (objx is not None) and (objy is not None):
# ###TODO: make sure pad scales with circle scale, so as to never hit the border
# center = (0.75*self.pixels)*np.array([objx,objy]) + np.array([0.125,0.125])*self.pixels
# ###TODO: translate latent scale to radius
# foreground = 255*self.circle2D(center,radius)
# foreground = foreground.reshape((1,)+foreground.shape)
# else:
# foreground = np.zeros((self.pixels,self.pixels,1))
# foreground = foreground.reshape((1,)+foreground.shape)
# if (backx is not None) and (backy is not None):
# background = self.gaussian2D(np.array([backx,backy]))
# background = (255*background).reshape((1,)+background.shape+(1,))
# else:
# background = np.zeros((self.pixels,self.pixels,1))
# background = (255*background).reshape((1,)+background.shape)
#
# # Combine foreground and background
# ims = np.clip(foreground+0.8*background,0,255).astype('uint8')
#
# return self.transformArray(ims)
pass
def findDsprite(self,shape=None,scale=None,orientation=None,posX=None,posY=None,back=None):
# # Outputs an image or set of images based on the search parameters given by the inputs
# # Second output is just a list of bools where the matching imgs are true
#
# # initialize query to include all dsprites
# query = np.full((self.latents_bases[0],), True)
# # narrow search
# if shape is not None:
# query = query & (self.latents_classes[:,1]==shape)
# if scale is not None:
# query = query & (self.latents_classes[:,2]==scale)
# if orientation is not None:
# query = query & (self.latents_classes[:,3]==orientation)
# if posX is not None:
# query = query & (self.latents_classes[:,4]==posX)
# if posY is not None:
# query = query & (self.latents_classes[:,5]==posY)
# # Convert list of bools to list of indices
# idx = np.where(query)[0]
# # Pick the images
# ims = self.pick_dSprite(idx)
#
# if back is not None:
# # Add background
# if type(back) is not np.ndarray:
# background = 2*self.gaussian2D(mu=2*np.random.randint(self.pixels/2,size=2))/self.pixels
# else:
# background = self.gaussian2D()
# background = 255*background.reshape((1,)+ims.shape[1:])
#
# # Combine foreground and background
# ims = np.clip(ims+0.8*np.tile(background,(ims.shape[0],1,1,1)),0,255).astype('uint8')
#
# return self.transformArray(ims),idx
pass
def transformArray(self,ims):
# Takes numopy array of image(s) and outputs the image(s), each transformed with self.transform
return torch.unsqueeze(torch.cat([self.transform(ims[i,:,:,:]) for i in np.arange(ims.shape[0])]),1)
# Generate 2D gaussian backgrounds
def gaussian2D(self,mu=np.array([0.5,0.5]),Sigma=np.array([[15, 0], [0, 15]]),pos=None):
if pos is None:
gridx, gridy = np.meshgrid(np.arange(0,self.pixels),np.arange(0,self.pixels))
pos = np.empty(gridx.shape + (2,))
pos[:,:,0] = gridx
pos[:,:,1] = gridy
mu = mu*self.pixels
Sigma = Sigma*self.pixels
# from https://scipython.com/blog/visualizing-the-bivariate-gaussian-distribution/
#n = mu.shape[0]
#Sigma_det = np.linalg.det(Sigma)
Sigma_inv = np.linalg.inv(Sigma)
#N = np.sqrt((2*np.pi)**n * Sigma_det)
# This einsum call calculates (x-mu)T.Sigma-1.(x-mu) in a vectorized
# way across all the input variables.
fac = np.einsum('...k,kl,...l->...', pos-mu, Sigma_inv, pos-mu)
#return np.exp(-fac / 2) / N
fac = np.exp(-fac/2)
# Normalized to peak at 1
return fac/np.max(fac)
# Generate a circle in a random position
def circle2D(self,center,radius=None,pos=None):
if radius is None:
radius = 0.1
radius = radius*self.pixels
if pos is None:
gridx, gridy = np.meshgrid(np.arange(0,self.pixels),np.arange(0,self.pixels))
z = np.square(gridx-center[0]) + np.square(gridy-center[1]) - radius
# Threshold by radius
z = (z<=np.square(radius)).astype('uint8')
# Output 3D [h,w,channel] tensor
return z.reshape(z.shape+(1,))
# Generate dSprite with 2D gaussian background
def pick_dSprite(self,idx=None):
if idx is None:
np.random.randint(self.latents_bases[0])
im = self.imgs[idx,:,:]
im = im.reshape(im.shape+(1,)) # add channel to end (assumes numpy ndarray)
return im
def getCircleSegmentationMasks(self, objx, objy, dr_in = .05, dr_margin = .05, radius = .1, thresh = .4):
"""
getCircleSegmentationMasks(self, objx, objy, dr_in = .05, dr_margin = .05, radius = .1, thresh = .4):
returns a segmentation of the image:
objectMask: mask corresponding to the circle object
objectEdgeMask: mask corresponding to the edge of the circle object
insideObjectMask: mask corresponding to the inner part of the circle (in the object but not part of the objectEdgeMask)
backgroundMask: mask of the rest (not in the objectMask or the objectEdgeMask)
inputs:
- dr_in: controls by how much smaller (absolute) the inner circle is than the actual circle object
- dr_margin: controls the radius of the area which is _not_ considered to | |
9, column = 0)
self.image_trim_dim = PhotoImage(file = os.path.join(appPath, 'res', 'chain_dim.gif'))
self.buttonTrim_dim=Button(self.frame2p, image = self.image_trim_dim, command = self.parent.trim_dim)#, bg = self.button_func_color)
self.buttonTrim_dim.grid(row = 10, column = 0)
#ToolTips
self.tooltip_line = ToolTip(self.buttonLine, text="Line")
self.tooltip_circle = ToolTip(self.buttonCircle, text="Circle")
self.tooltip_arc = ToolTip(self.buttonArc, text="Arc")
self.tooltip_dim = ToolTip(self.buttonDim, text="Dimension")
self.tooltip_dimR = ToolTip(self.buttonDimR, text="Radial dimension")
self.tooltip_text = ToolTip(self.buttonText, text="Text")
self.tooltip_copy = ToolTip(self.buttonCopy, text="Copy")
self.tooltip_move = ToolTip(self.buttonMove, text="Move")
self.tooltip_mir = ToolTip(self.buttonMir, text="Mirror")
self.tooltip_rot = ToolTip(self.buttonRot, text="Rotate")
self.tooltip_offset = ToolTip(self.buttonOffset, text="Offset")
self.tooltip_copy_p = ToolTip(self.buttonCopyP, text="Copy properties")
self.tooltip_fillet = ToolTip(self.buttonFillet, text="Fillet")
self.tooltip_trim = ToolTip(self.buttonTrim, text="Trim line")
self.tooltip_extend = ToolTip(self.buttonExtend, text="Extend line")
self.tooltip_scale = ToolTip(self.buttonScale, text="Scale")
self.tooltip_trim_dim = ToolTip(self.buttonTrim_dim, text="Chain dimension")
#Канвас
self.canvas=Canvas(self.frame1,bg=self.parent.fon_color)
self.canvas.config(cursor='crosshair')
self.canvas.pack(side=LEFT, fill=BOTH, expand=YES)
self.cc = None
self.frame2p.pack(side=LEFT,fill=Y)
#Действия событий
def select_color(event):
col = self.combo_color.get()
self.parent.color = col
self.label_color.config(bg = col)
self.parent.param_edit({'fill':col})
self.command.focus_set()
def select_width(event):
wid = self.combo_width.get()
self.parent.width = wid
self.parent.param_edit({'width':wid})
self.command.focus_set()
def select_stipple(event):
s = self.combo_s.get()
stipple = self.stipples[s]
#if stipple:
#stipple = map(lambda x: x*self.parent.stipple_size, stipple)
self.parent.stipple = stipple
self.parent.param_edit({'stipple':stipple})
self.command.focus_set()
#События
self.combo_color.bind("<<ComboboxSelected>>", select_color)
self.combo_width.bind("<<ComboboxSelected>>", select_width)
self.combo_s.bind("<<ComboboxSelected>>", select_stipple)
self.entry_size_t.bind("<FocusOut>", size_t_ok)
self.entry_size_f.bind("<FocusOut>", size_f_ok)
def about(self, event = None):
self.imag = PhotoImage(file = os.path.join(appPath, 'media', 'PyTkCad_icon_main100.png'))
eroot = Toplevel()
eroot.title('About programm')
eroot.resizable(width=FALSE, height=FALSE)
l_donate = Label(eroot, justify = LEFT, text = self.Habout)
but = Button(eroot, text = 'License', command = self.license_)
but2 = Button(eroot, text = 'Donate', command = self.parent.d)
but3 = Button(eroot, text = 'Close', command = eroot.destroy)
ca = Canvas(eroot, width = 100, height = 100)
ca.create_image(0,0,anchor=NW,image = self.imag)
ca.grid(row=0, column = 0, rowspan = 2, padx = 5, pady = 5)
l_donate.grid(row=0, column = 1,columnspan = 3, padx = 10, pady = 10)
but.grid(row=1, column = 1, padx = 10, pady = 10)
but2.grid(row=1, column = 2, padx = 10, pady = 10)
but3.grid(row=1, column = 3, padx = 10, pady = 10)
def license_(self, event = None):
eroot = Toplevel()
eroot.title('License')
eroot.resizable(width=FALSE, height=FALSE)
l_ = Label(eroot, justify = LEFT, text = self.lic)
but = Button(eroot, text = 'Close', command = eroot.destroy)
l_.grid(row=0, column = 1,columnspan = 3, padx = 10, pady = 10)
but.grid(row=1, column = 2, padx = 10, pady = 10)
def draw_opt(self):
if self.d_opt1_off_on == False:
self.d_opt1 = Draw_options()
def dim_opt(self):
if self.d_opt2_off_on == False:
self.d_opt2 = Dim_options()
def line_opt(self):
if self.d_opt3_off_on == False:
self.d_opt3 = Line_options()
def text_opt(self):
if self.d_opt4_off_on == False:
self.d_opt4 = Text_options()
def obj_prop(self, event = None):
if self.o_prop_off_on == False:
self.o_prop = Object_properties()
def update_prop(self):
if self.o_prop:
self.o_prop.viewer()
def normal(self, num):
if num < 0:
num = -num
if num > 0:
num = float(num)
return num
class Options:
def __init__(self):
self.window = Toplevel()
self.ico = PhotoImage(file = os.path.join(appPath, 'res', 'options.gif'))
self.window.tk.call('wm', 'iconphoto', self.window._w, self.ico)
self.window.transient(gui.master1)
self.frame_buttons = Frame(self.window)
self.window.resizable(width=FALSE, height=FALSE)
self.button_apply = Button(self.frame_buttons, text = 'Apply', command = self.apply_p)
self.button_close = Button(self.frame_buttons, text = 'Close', command = self.exitMethod)
self.button_apply.grid(row = 0, column = 0, sticky = 'w', padx = 3, pady = 3)
self.button_close.grid(row = 0, column = 1, sticky = 'w', padx = 3, pady = 3)
self.window.protocol('WM_DELETE_WINDOW', self.exitMethod)
self.window.bind("<Escape>", self.exitMethod)
self.window.bind("<Return>", self.apply_p)
def apply_p(self):
pass
def exitMethod(self, event = None):
pass
class Draw_options(Options):
def __init__(self):
Options.__init__(self)
gui.d_opt1_off_on = True
self.window.title('Draw options')
self.frame_label1 = Frame(self.window)
self.frame_options1 = Frame(self.window)
self.frame_label2 = Frame(self.window)
self.frame_options2 = Frame(self.window)
def check_snap():
if self.snap_Flag == False:
self.snap_Flag = True
else:
self.snap_Flag = False
def check_tracing():
if self.tracing_Flag == False:
self.tracing_Flag = True
else:
self.tracing_Flag = False
def select_color_snap(event):
col = self.combo_col_snap.get()
self.label_col_snap2.config(bg = col)
def select_color_select(event):
col = self.combo_col_select.get()
self.label_col_select2.config(bg = col)
def select_color_fon(event):
col = self.combo_col_fon.get()
self.label_col_fon2.config(bg = col)
self.snap_Flag = False
self.tracing_Flag = False
self.snap_Flagg = IntVar()
self.tracing_Flagg = IntVar()
self.label_n_snap = Label(self.frame_label1, text = 'Snap options')
self.n_snap = Checkbutton(self.frame_options1,text='Snap to near point',variable = self.snap_Flagg, command = check_snap)
self.tracing = Checkbutton(self.frame_options1,text='Tracing',variable = self.tracing_Flagg, command = check_tracing)
self.label_tracing_step = Label(self.frame_options1, text = 'Step of tracing angle')
self.combo_tracing_step = ttk.Combobox(self.frame_options1, values = ('5.0',
'10.0',
'15.0',
'30.0',
'45.0',
'60.0',
'90.0'), width = 10, state='readonly')
self.combo_tracing_step.set(gui.parent.angle_s)
self.label_col_snap1 = Label(self.frame_options2, text = 'Color snap icon')
self.label_col_snap2 = Label(self.frame_options2, text = ' ', bg = gui.parent.priv_color)
self.combo_col_snap = ttk.Combobox(self.frame_options2, values = gui.colores, width = 10, state='readonly')
self.combo_col_snap.set(gui.parent.priv_color)
self.combo_col_snap.bind("<<ComboboxSelected>>", select_color_snap)
self.label_size_snap_z = Label(self.frame_options1, text = 'Size snap icon')
self.entry_size_snap_z = FloatEntry(self.frame_options1)
self.entry_size_snap_z.insert(0, gui.parent.size_simbol_p)
self.label_dist_snap = Label(self.frame_options1, text = 'Snap distanse')
self.entry_size_snap = FloatEntry(self.frame_options1)
self.entry_size_snap.insert(0, gui.parent.snap_s)
self.label_dr_opt = Label(self.frame_label2, text = 'Show options')
'''
self.label_dim_s = Label(self.frame_options2, text = 'Пропорции размеров')
self.entry_dim_s = FloatEntry(self.frame_options2)
self.entry_dim_s.insert(0, gui.parent.s)
'''
self.label_col_select = Label(self.frame_options2, text = 'Color of select objects')
self.label_col_select2 = Label(self.frame_options2, text = ' ', bg = gui.parent.select_color)
self.combo_col_select = ttk.Combobox(self.frame_options2, values = gui.colores, width = 10, state='readonly')
self.combo_col_select.set(gui.parent.select_color)
self.combo_col_select.bind("<<ComboboxSelected>>", select_color_select)
self.label_col_fon = Label(self.frame_options2, text = 'Color background')
self.label_col_fon2 = Label(self.frame_options2, text = ' ', bg = gui.parent.fon_color)
self.combo_col_fon = ttk.Combobox(self.frame_options2, values = gui.colores, width = 10, state='readonly')
self.combo_col_fon.set(gui.parent.fon_color)
self.combo_col_fon.bind("<<ComboboxSelected>>", select_color_fon)
#Упаковщик
self.frame_label1.grid(row = 0, column = 0)
self.frame_options1.grid(row = 1, column = 0, sticky = 'w')
self.frame_label2.grid(row = 2, column = 0)
self.frame_options2.grid(row = 3, column = 0, sticky = 'w')
self.frame_buttons.grid(row = 4, column = 0, sticky = 'e')
self.label_n_snap.grid(row = 0, column = 0, columnspan = 2)
self.n_snap.grid(row = 0, column = 0, columnspan = 2, sticky = 'w', padx = 3, pady = 3)
self.tracing.grid(row = 1, column = 0, columnspan = 2, sticky = 'w', padx = 3, pady = 3)
self.label_size_snap_z.grid(row = 2, column = 0, sticky = 'w', padx = 3, pady = 3)
self.entry_size_snap_z.grid(row = 2, column = 1, sticky = 'w', padx = 3, pady = 3)
self.label_dist_snap.grid(row = 3, column = 0, sticky = 'w', padx = 3, pady = 3)
self.entry_size_snap.grid(row = 3, column = 1, sticky = 'w', padx = 3, pady = 3)
self.label_tracing_step.grid(row = 4, column = 0, sticky = 'w', padx = 3, pady = 3)
self.combo_tracing_step.grid(row = 4, column = 1, sticky = 'w', padx = 3, pady = 3)
self.label_dr_opt.grid(row = 0, column = 0, columnspan = 2)
#self.label_dim_s.grid(row = 0, column = 0, sticky = 'w', padx = 3, pady = 3, columnspan = 2)
#self.entry_dim_s.grid(row = 0, column = 1, sticky = 'w', padx = 3, pady = 3, columnspan = 2)
self.label_col_snap1.grid(row = 0, column = 0, sticky = 'w', padx = 3, pady = 3)
self.label_col_snap2.grid(row = 0, column = 1, sticky = 'w', padx = 3, pady = 3)
self.combo_col_snap.grid(row = 0, column = 2, sticky = 'w', padx = 3, pady = 3)
self.label_col_select.grid(row = 1, column = 0, sticky = 'w', padx = 3, pady = 3)
self.label_col_select2.grid(row = 1, column = 1, sticky = 'w', padx = 3, pady = 3)
self.combo_col_select.grid(row = 1, column = 2, sticky = 'w', padx = 3, pady = 3)
self.label_col_fon.grid(row = 2, column = 0, sticky = 'w', padx = 3, pady = 3)
self.label_col_fon2.grid(row = 2, column = 1, sticky = 'w', padx = 3, pady = 3)
self.combo_col_fon.grid(row = 2, column = 2, sticky = 'w', padx = 3, pady = 3)
if gui.parent.snap_near == True:
self.n_snap.select()
self.snap_Flag = True
if gui.parent.tracingFlag == True:
self.tracing.select()
self.tracing_Flag = True
def apply_p(self, event = None):
n_snap = self.snap_Flag
tracing = self.tracing_Flag
tracing_step = self.combo_tracing_step.get()
combo_col_snap = self.combo_col_snap.get()
entry_size_snap_z = self.entry_size_snap_z.get()
entry_size_snap = self.entry_size_snap.get()
combo_col_select = self.combo_col_select.get()
combo_col_fon = self.combo_col_fon.get()
gui.parent.angle_s = float(tracing_step)
gui.parent.snap_near = n_snap
gui.parent.tracingFlag = tracing
gui.parent.priv_color = combo_col_snap
gui.parent.select_color = combo_col_select
gui.parent.fon_color = combo_col_fon
gui.parent.c.config(bg = combo_col_fon)
if combo_col_fon == 'light blue':
gui.parent.left_color = 'black'
else:
gui.parent.left_color = 'light blue'
if combo_col_fon == 'red':
gui.parent.right_color = 'orange'
else:
gui.parent.right_color = 'red'
gui.parent.size_simbol_p = gui.normal(entry_size_snap_z)
gui.parent.snap_s = gui.normal(entry_size_snap)
gui.parent.snap_n(color_only = 'yes')
gui.parent.ort(color_only = 'yes')
gui.parent.trac(color_only = 'yes')
def exitMethod(self, event = None):
self.window.destroy()
gui.d_opt1_off_on = False
class Dim_options(Options):
def __init__(self):
Options.__init__(self)
gui.d_opt2_off_on = True
self.window.title('Dimension options')
self.frame_options1 = Frame(self.window)
self.frame_pic = Frame(self.window, bg = 'white', bd = 5, relief = RIDGE)
self.select = 0
def select_type_arrow(event):
self.select = 1
self.imag = PhotoImage(file = os.path.join(appPath, 'res', 'dim_prop.gif'))
| |
<reponame>SolarAlma/SALAT
import numpy as np
from astropy.io import fits
import astropy.units as u
from datetime import datetime,timedelta
import scipy
from scipy import ndimage
from scipy import stats as scpstats
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from typing import NamedTuple
import radio_beam as rb
import tqdm
############################ SALAT READ ############################
def read(file,fillNan=False,timeout=False,beamout=False,HEADER=True,SILENT=False):
"""
Name: read
part of -- Solar Alma Library of Auxiliary Tools (SALAT) --
Purpose: This function loads all type of data on level 4 fits
Parameters
----------
file: string
path to ALMA cube
fillNan: Boolean, False default
If True user will be asked to enter value
or to choose if wants to use Median
timeout: Boolean, False default
If True it returns 1D array of time in seconds
beamout: Boolean, False default
If True it returns 3 arrays being beam axes ang angles
HEADER: Boolean, True default
If False it does not returns original header (make use of salat_read_header)
SILENT: Boolean, False default
If True it does not print out info in terminal
Returns
-------
sqcubecrop: np.array
Squeezed and cropped ALMA cube with dimensions [t,x,y]
hdr: astropy.io.fits.header.Header
main header
timesec: np.array
Optional, array with time in seconds (0 s is start observation)
timeutc: np.array of datetime.datetime
Optional, array with time in UTC
beammajor: np.array
Optional, array with beam major axes in arcsec
beamminor: np.array
Optional, array with beam minor axes in arcsec
beamangle: np.array
Optional, array with beam angles in degrees
Examples
--------
>>> import salat
>>> file = "./solaralma.b3.fba.20161222_141931-150707.2016.1.00423.S.level4.k.fits"
#To get only cube, note that _ are mandatory for non-asked variables
>>> almacube,_,_,_,_,_,_ = salat.read(file,SILENT=True)
#To get cube and times and print out information in Terminal
>>> almacube,_,timesec,timeutc,_,_,_ = salat.read(file,timeout=True)
#To get cube and beam info and print out information in Terminal
>>> almacube,_,_,_,beammajor,beamminor,beamangle = salat.read(file,beamout=True)
Modification history:
---------------------
© <NAME>. (RoCS/SolarALMA), July 2021
"""
print("")
print("---------------------------------------------------")
print("--------------- SALAT READ part of ----------------")
print("-- Solar Alma Library of Auxiliary Tools (SALAT) --")
print("")
print("Reading ALMA cube")
print("")
############### READ CUBE FITS################
cubedata = fits.open(file) #Cube data dimensions [t,S,f,x,y] main
sqcube = np.squeeze(cubedata[0].data) #Cube images squeezed to [t,y,x]
times = []
aii_all = []
afi_all = []
for item in tqdm.tqdm(sqcube):
af = item
afw = af.shape[0]
afri = int(afw/2)
aii = int(np.argwhere(~np.isnan(af[afri]))[0]) #Identify left non-Nan
afi = int(np.argwhere(~np.isnan(af[afri]))[-1])#identify right non-Nan
aii_all.append(aii)
afi_all.append(afi)
del aii,afi,afw,afri
afi = int(scpstats.mode(afi_all).mode) #Stats mode of indexes left and right for non-Nans
aii = int(scpstats.mode(aii_all).mode) #Stats mode of indexes left and right for non-Nans
sqcubecrop = sqcube[:,aii:afi,aii:afi].copy() #Cube is cropped to removed Nans around
#Filling Nans if option True
if fillNan:
useroption = input("Do you want to fill NaN with data median? (y/n): ")
if useroption == 'y' or useroption == 'Y' or useroption == 'yes' or useroption == 'Yes':
NaNValue = np.nanmedian(sqcubecrop) #to be used to fill Nans
sqcubecrop[np.isnan(sqcubecrop)] = NaNValue
else:
NaNValue = eval(input("Enter value to fill NaN with: "))
sqcubecrop[np.isnan(sqcubecrop)] = NaNValue
############### Read header ################
hdr0 = read_header(file,ALL=True,ORIGINAL=True,SILENT=True)
############### Reading Times ################
if timeout:
# print("Reading Times")
# print("")
dateobs = hdr0["DATE-OBS"][:10]
timesec = cubedata[1].data[3]-np.nanmin(cubedata[1].data[3]) #Time array in Seconds
timeutc = np.array([datetime.strptime(hdr0["DATE-OBS"][:10],"%Y-%m-%d")+
timedelta(seconds=int(item),microseconds=int(1e6*(item%1))) for item in cubedata[1].data[3]])
else:
timesec = None
timeutc = None
############### Reading Beam axes and angle ################
if beamout:
# print("Reading Beam properties")
# print("")
beammajor = np.array([item*u.deg.to(u.arcsec)
for item in cubedata[1].data[0]]) #unsure about index for BMAJ and BMIN
beamminor = np.array([item*u.deg.to(u.arcsec)
for item in cubedata[1].data[1]])
beamangle = np.array([item
for item in cubedata[1].data[2]])
else:
beammajor = None
beamminor = None
beamangle = None
############### Print out in terminal ################
if SILENT == False:
info(file)
############### Return variables ################
# If options are False, variables are None
print("Done!")
if HEADER == False:
hdr0 = None
return sqcubecrop,hdr0,timesec,timeutc,beammajor,beamminor,beamangle
else:
return sqcubecrop,hdr0,timesec,timeutc,beammajor,beamminor,beamangle
############################ SALAT READ HEADER ############################
def read_header(file,ALL=False,ORIGINAL=False,SILENT=False):
"""
Name: read_header
part of -- Solar Alma Library of Auxiliary Tools (SALAT) --
Purpose: This function load the header of a ALMA cube according
to the description in the handbook.
Parameters
----------
file: string
path to ALMA cube
ALL: Boolean, False Default
If True, original header as as astropy.io.fits.header.Header is returned
If False, header is returned as class structure depending of ORIGINAL parameters
ORIGINAL: Boolean, False Default
If True, header structure preserves original keyword names
If False, header structure get new meaninful keywords as in documentation
SILENT: Boolean, False Default
If True, it does not print out in terminal
Returns
-------
header: Class or astropy.io.fits.header.Header
Header as Namedtuple CLass that can be accessed as header.__varname__ if ALL=FALSE
Header as astropy.io.fits.header.Header if ALL=TRUE
Examples
-------
>>> import salat
>>> path_alma = "./solaralma.b6.fba.20170328-150920_161212.2016.1.00788.S.level4.k.fits"
>>> header = salat.read_header(path_alma)
Modification history:
---------------------
© <NAME>. (RoCS/SolarALMA), July, August 2021
"""
############### Loading Original Header ################
hdr0 = fits.open(file)[0].header
############### Header structure depending on input options ################
if ALL == False: #If ALL FALSE then only important tags names are passed to structure
#the important tag names are manually defined
important_tags = ['BMAJ','BMIN','BPA','CRVAL1','CRVAL2','CRVAL3','CRVAL1A','CRVAL2A','RESTFRQ','DATE-OBS',
'INSTRUME','DATAMIN','DATAMAX','PROPCODE','PWV','CDELT1A']
important_tags_meaningful = ['major_beam_mean','minor_beam_mean','beam_angle_mean','RA','Dec','Frequency','solarx','solary','Rest_frequency','DATE_OBS',
'ALMA_Band','min_of_datacube','max_of_datacube','ALMA_project_id','water_vapour','pixel_size']
important_tags_values = [hdr0[item] for item in important_tags]
important_tags_values_type = [type(item) for item in important_tags_values]
if ORIGINAL == True:
important_tags = [w.replace('-', '_') for w in important_tags] #Class dont handle - for varname
class Header(NamedTuple):
"""
Define tags and types
"""
for i in range(len(important_tags_values)):
exec("%s : %s" % (important_tags[i],important_tags_values_type[i].__name__))
else:
class Header(NamedTuple):
"""
Define tags and types
"""
for i in range(len(important_tags_values)):
exec("%s : %s" % (important_tags_meaningful[i],important_tags_values_type[i].__name__))
header = Header(*important_tags_values)
else: #Otherwise, all are passed to structure
header = hdr0.copy()
############### Print out in terminal ################
if SILENT == False:
print("")
print("---------------------------------------------------")
print("------------ SALAT READ HEADER part of ------------")
print("-- Solar Alma Library of Auxiliary Tools (SALAT) --")
print("")
print(' ')
print(' --------------------------------------------------')
print(' | Selected parameters from the header:')
print(' --------------------------------------------------')
print(' | Time of observations: ',hdr0['DATE-OBS'])
print(' | ALMA Band: ',hdr0['INSTRUME'])
print(' | ALMA Project ID: ',hdr0['PROPCODE'])
print(' | Solar x (arcsec) ~ ',hdr0['CRVAL1A'])
print(' | Solar y (arcsec) ~ ',hdr0['CRVAL2A'])
print(' | Pixel size (arcsec): ',hdr0['CDELT1A'])
print(' | Mean of major axis of beam (deg): ',hdr0['BMAJ'])
print(' | Mean of minor axis of beam (deg): ',hdr0['BMIN'])
print(' | Mean of beam angle (deg): ',hdr0['BMAJ'])
print(' | Frequency (Hz): ',hdr0['CRVAL3'])
print(' | Water Vapour: ',hdr0['PWV'])
print(' ---------------------------------------------------')
print(' ')
return header
############################ SALAT STATS ############################
def stats(almadata,Histogram=False,SILENT=False):
"""
Name: stats
part of -- Solar Alma Library of Auxiliary Tools (SALAT) --
Purpose: This function computes basic stats for data cube
Parameters
----------
almadata: np.array
Data array from user, can be 2D or 3D
Histogram: Boolean, False default
If True plots histogram
SILENT: Boolean, False Default
If True, it does not print out in terminal
Returns
-------
datastats: Dictionary
Dictionary with stats as detailed in Handbook
Examples
--------
>>> import salat
#Datacube or frame existing with name almadata
#Create Stats printing in terminal and plotting histo
>>> datastats = salat.stats(almadata,Histogram=True,)
Modification history:
---------------------
© <NAME>. (RoCS/SolarALMA), July 2021
"""
print("")
print("---------------------------------------------------")
print("--------------- SALAT STATS part of ----------------")
print("-- Solar Alma Library of Auxiliary Tools (SALAT) --")
print("")
print("Computing Statistics")
print("")
print("----------------------------------------------")
############### Computing Stats (NaNs are ignored) ################
mindata = np.nanmin(almadata) #min data
maxdata = np.nanmax(almadata) #max data
meandata = np.nanmean(almadata) #mean data
mediandata = np.nanmedian(almadata) #median data
stddata = np.nanstd(almadata) #Std data
vardata = np.nanvar(almadata) #Variance data
skewdata = float(scpstats.skew(almadata,axis=None,nan_policy='omit').data) #Skewness data Fisher-Pearson coefficient of skewness
kurtdata = scpstats.kurtosis(almadata,axis=None,nan_policy='omit') #Kurtosis data
modedata = float(scpstats.mode(almadata,axis=None,nan_policy='omit')[0]) #Mode data
percentile1 = np.nanpercentile(almadata,[1,99]) #Value range btw 1st and 99th percentile
percentile5 = np.nanpercentile(almadata,[5,95]) #Value range btw 5th and 95th percentile
############### Creating Dictionary ################
datastats = {"MIN":mindata,
"MAX":maxdata,
"MEAN":meandata,
"MEDIAN":mediandata,
"MODE":modedata,
"STD":stddata,
"VAR":vardata,
"SKEW":skewdata,
"KURT":kurtdata,
"PERCENTILE1":percentile1,
"PERCENTILE5":percentile5}
############### Print out in terminal ################
if SILENT == False:
shapedata = np.shape(almadata)
print("")
print("----------------------------------------------")
print("| Statistics: ")
print("----------------------------------------------")
if len(shapedata) == 2:
print("| Array size: x = %i y = %i"%(shapedata[1],shapedata[0]))
else:
print("| Array size: t = %i x = %i y = %i"%(shapedata[0],shapedata[2],shapedata[1]))
print("| Min = ",mindata)
print("| Max = ",maxdata)
print("| Mean = ",meandata)
print("| Median = ",mediandata)
print("| Mode = ",modedata)
print("| Standard deviation = ",stddata)
print("| Variance = ",vardata)
print("| Skew = ",skewdata)
print("| Kurtosis = ",kurtdata)
print("| Percentile 1 = ",percentile1)
print("| Percentile 5 = ",percentile5)
print("----------------------------------------------")
print("")
############### Plotting Histogram ################
if Histogram == True:
flatdata = np.hstack(np.hstack(almadata.copy()))
flatdata = flatdata[~np.isnan(flatdata)]
#Making figure
fig, ax = plt.subplots(ncols=1,nrows=1,figsize=(12,6))
binwidth = int((maxdata-mindata)/50) #Make histogram beams of ~50 K
n, bins= np.histogram(flatdata,binwidth)
n = n/n.max()
bins = bins[:-1]
ax.plot(bins, n,color='black', drawstyle='steps-mid')
ax.fill_between(bins,n,color='gray', step="mid", alpha=0.4,label = r'<T$_{med}$> = %.0f K'%(mediandata))
ax.set_title(r'Histogram',fontsize=22)
ax.set_xlabel(r'Temperature [K]',fontsize=20)
ax.set_ylabel(r'Normalised frequency',fontsize=20)
ax.legend(fontsize=20,loc=6)
ax.tick_params(axis='both', which='major', labelsize=18)
plt.tight_layout()
plt.show()
############### Return variables ################
return datastats
############################ SALAT TIMELINE ############################
def timeline(timesec,gap=30):
"""
Name: timeline
part of -- Solar Alma Library of Auxiliary Tools (SALAT) --
Purpose: This function displays a timeline showing missing frames and gaps
Parameters
----------
timesec: np.array
Time array in seconds
gap: float, 30 seconds default
Time gap to consider different scans
Returns
-------
scans_idxs: Dict.
Dictionary with indexes for all scans
mfram_idxs: Dict.
Dictionary with indexes for all consequent sequences
Examples
-------
>>> import salat
>>> scans_idxs,mfram_idxs = salat.timeline(timesec,gap=30)
Modification history:
---------------------
© <NAME>. (RoCS/SolarALMA), July 2021
© <NAME>. (RoCS/SolarALMA), July 2021
"""
print("")
print("---------------------------------------------------")
print("------------- SALAT TIME LINE part of -------------")
print("-- Solar Alma Library of Auxiliary Tools (SALAT) --")
print("")
############### Finding Scans and storing indexes in dictionary ################
cadence = scpstats.mode(np.ediff1d(timesec))[0][0]
tidx_scans = np.where(np.ediff1d(timesec)>(gap))[0]+1 #gap is defined for scans
scans_idxs = {}
nl = len(tidx_scans)
for i in range(nl+1):
if i == 0:
scans_idxs["Sc. %i"%(i+1)] = [0,tidx_scans[i]-1]
itmp = tidx_scans[i]
elif i != 0 and i!= nl:
scans_idxs["Sc. %i"%(i+1)] = [itmp,tidx_scans[i]-1]
itmp = tidx_scans[i]
else:
scans_idxs["Sc. %i"%(i+1)] = [itmp,len(timesec)-1]
############### Finding indexes of missing frames ################
tidx_mfram = np.where(np.ediff1d(timesec)>(cadence+1))[0]+1 #gap is defined as cadence+1sec
mfram_idxs = {}
nl = len(tidx_mfram)
#Defining consequent secuences Sec.
for i in range(nl+1):
if i == 0:
mfram_idxs["Sec. %i"%(i+1)] = [0,tidx_mfram[i]-1]
itmp = tidx_mfram[i]
elif i | |
<gh_stars>1-10
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib
from oslo_utils import uuidutils
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
from zaqar_tempest_plugin.api_schema.response.v1 \
import queues as v1schema
from zaqar_tempest_plugin.api_schema.response.v1_1 \
import queues as v11schema
from zaqar_tempest_plugin.api_schema.response.v2 \
import queues as v2schema
class MessagingClient(rest_client.RestClient):
def __init__(self, auth_provider, service, region, **kwargs):
super(MessagingClient, self).__init__(
auth_provider, service, region, **kwargs)
self.version = '1'
self.uri_prefix = 'v{0}'.format(self.version)
client_id = uuidutils.generate_uuid(dashed=False)
self.headers = {'Client-ID': client_id}
class V1MessagingClient(MessagingClient):
def __init__(self, auth_provider, service, region, **kwargs):
super(V1MessagingClient, self).__init__(
auth_provider, service, region, **kwargs)
self.version = '1'
def list_queues(self):
uri = '{0}/queues'.format(self.uri_prefix)
resp, body = self.get(uri)
if resp['status'] != '204':
body = json.loads(body)
self.validate_response(v1schema.list_queues, resp, body)
return resp, body
def create_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.put(uri, body=None)
self.expected_success(201, resp.status)
return resp, body
def show_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.get(uri)
self.expected_success(204, resp.status)
return resp, body
def head_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.head(uri)
self.expected_success(204, resp.status)
return resp, body
def delete_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.delete(uri)
self.expected_success(204, resp.status)
return resp, body
def show_queue_stats(self, queue_name):
uri = '{0}/queues/{1}/stats'.format(self.uri_prefix, queue_name)
resp, body = self.get(uri)
body = json.loads(body)
self.validate_response(v1schema.queue_stats, resp, body)
return resp, body
def show_queue_metadata(self, queue_name):
uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
def set_queue_metadata(self, queue_name, rbody):
uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name)
resp, body = self.put(uri, body=json.dumps(rbody))
self.expected_success(204, resp.status)
return resp, body
def post_messages(self, queue_name, rbody):
uri = '{0}/queues/{1}/messages'.format(self.uri_prefix, queue_name)
resp, body = self.post(uri, body=json.dumps(rbody),
extra_headers=True,
headers=self.headers)
body = json.loads(body)
self.validate_response(v1schema.post_messages, resp, body)
return resp, body
def list_messages(self, queue_name):
uri = '{0}/queues/{1}/messages?echo=True'.format(self.uri_prefix,
queue_name)
resp, body = self.get(uri, extra_headers=True, headers=self.headers)
if resp['status'] != '204':
body = json.loads(body)
self.validate_response(v1schema.list_messages, resp, body)
return resp, body
def show_single_message(self, message_uri):
resp, body = self.get(message_uri, extra_headers=True,
headers=self.headers)
if resp['status'] != '204':
body = json.loads(body)
self.validate_response(v1schema.get_single_message, resp,
body)
return resp, body
def show_multiple_messages(self, message_uri):
resp, body = self.get(message_uri, extra_headers=True,
headers=self.headers)
if resp['status'] != '204':
body = json.loads(body)
self.validate_response(v1schema.get_multiple_messages,
resp,
body)
return resp, body
def delete_messages(self, message_uri):
resp, body = self.delete(message_uri)
self.expected_success(204, resp.status)
return resp, body
def post_claims(self, queue_name, rbody, url_params=False):
uri = '{0}/queues/{1}/claims'.format(self.uri_prefix, queue_name)
if url_params:
uri += '?%s' % urllib.parse.urlencode(url_params)
resp, body = self.post(uri, body=json.dumps(rbody),
extra_headers=True,
headers=self.headers)
body = json.loads(body)
self.validate_response(v1schema.claim_messages, resp, body)
return resp, body
def query_claim(self, claim_uri):
resp, body = self.get(claim_uri)
if resp['status'] != '204':
body = json.loads(body)
self.validate_response(v1schema.query_claim, resp, body)
return resp, body
def update_claim(self, claim_uri, rbody):
resp, body = self.patch(claim_uri, body=json.dumps(rbody))
self.expected_success(204, resp.status)
return resp, body
def delete_claim(self, claim_uri):
resp, body = self.delete(claim_uri)
self.expected_success(204, resp.status)
return resp, body
class V11MessagingClient(MessagingClient):
def __init__(self, auth_provider, service, region, **kwargs):
super(V11MessagingClient, self).__init__(
auth_provider, service, region, **kwargs)
self.version = '1.1'
self.uri_prefix = 'v{0}'.format(self.version)
def list_queues(self):
uri = '{0}/queues'.format(self.uri_prefix)
resp, body = self.get(uri, headers=self.headers)
if resp['status'] != '204':
body = json.loads(body)
self.validate_response(v11schema.list_queues, resp, body)
return resp, body
def create_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.put(uri, body=None, headers=self.headers)
self.expected_success(201, resp.status)
return resp, body
def show_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.get(uri, headers=self.headers)
self.expected_success(200, resp.status)
return resp, body
def delete_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.delete(uri, headers=self.headers)
self.expected_success(204, resp.status)
return resp, body
def show_queue_stats(self, queue_name):
uri = '{0}/queues/{1}/stats'.format(self.uri_prefix, queue_name)
resp, body = self.get(uri, headers=self.headers)
body = json.loads(body)
self.validate_response(v11schema.queue_stats, resp, body)
return resp, body
def show_queue_metadata(self, queue_name):
uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name)
resp, body = self.get(uri, headers=self.headers)
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
def set_queue_metadata(self, queue_name, rbody):
uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name)
resp, body = self.put(uri, body=json.dumps(rbody),
headers=self.headers)
self.expected_success(204, resp.status)
return resp, body
def post_messages(self, queue_name, rbody):
uri = '{0}/queues/{1}/messages'.format(self.uri_prefix, queue_name)
resp, body = self.post(uri, body=json.dumps(rbody),
extra_headers=True,
headers=self.headers)
body = json.loads(body)
self.validate_response(v11schema.post_messages, resp, body)
return resp, body
def list_messages(self, queue_name):
uri = '{0}/queues/{1}/messages?echo=True'.format(self.uri_prefix,
queue_name)
resp, body = self.get(uri, extra_headers=True, headers=self.headers)
if resp['status'] != '204':
body = json.loads(body)
self.validate_response(v11schema.list_messages, resp, body)
return resp, body
def show_single_message(self, message_uri):
resp, body = self.get(message_uri, extra_headers=True,
headers=self.headers)
if resp['status'] != '204':
body = json.loads(body)
self.validate_response(v11schema.get_single_message, resp,
body)
return resp, body
def show_multiple_messages(self, message_uri):
resp, body = self.get(message_uri, extra_headers=True,
headers=self.headers)
if resp['status'] != '404':
body = json.loads(body)
self.validate_response(v11schema.get_multiple_messages,
resp,
body)
return resp, body
def delete_messages(self, message_uri):
resp, body = self.delete(message_uri, headers=self.headers)
self.expected_success(204, resp.status)
return resp, body
def post_claims(self, queue_name, rbody, url_params=False):
uri = '{0}/queues/{1}/claims'.format(self.uri_prefix, queue_name)
if url_params:
uri += '?%s' % urllib.parse.urlencode(url_params)
resp, body = self.post(uri, body=json.dumps(rbody),
extra_headers=True,
headers=self.headers)
body = json.loads(body)
self.validate_response(v11schema.claim_messages, resp, body)
return resp, body
def query_claim(self, claim_uri):
resp, body = self.get(claim_uri, headers=self.headers)
if resp['status'] != '204':
body = json.loads(body)
self.validate_response(v11schema.query_claim, resp, body)
return resp, body
def update_claim(self, claim_uri, rbody):
resp, body = self.patch(claim_uri, body=json.dumps(rbody),
headers=self.headers)
self.expected_success(204, resp.status)
return resp, body
def delete_claim(self, claim_uri):
resp, body = self.delete(claim_uri, headers=self.headers)
self.expected_success(204, resp.status)
return resp, body
class V2MessagingClient(MessagingClient):
def __init__(self, auth_provider, service, region, **kwargs):
super(V2MessagingClient, self).__init__(
auth_provider, service, region, **kwargs)
self.version = '2'
self.uri_prefix = 'v{0}'.format(self.version)
def list_queues(self, url_params=False):
uri = '{0}/queues'.format(self.uri_prefix)
if url_params:
uri += '?%s' % urllib.parse.urlencode(url_params)
resp, body = self.get(uri, headers=self.headers)
if resp['status'] != '204':
body = json.loads(body)
self.validate_response(v2schema.list_queues, resp, body)
return resp, body
def create_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.put(uri, body=None, headers=self.headers)
self.expected_success(201, resp.status)
return resp, body
def show_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.get(uri, headers=self.headers)
self.expected_success(200, resp.status)
return resp, body
def delete_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.delete(uri, headers=self.headers)
self.expected_success(204, resp.status)
return resp, body
def purge_queue(self, queue_name, resource=None):
uri = '{0}/queues/{1}/purge'.format(self.uri_prefix, queue_name)
rbody = {"resource_types": ["messages", "subscriptions"]}
if resource:
rbody = {"resource_types": resource}
resp, body = self.post(uri, body=json.dumps(rbody),
headers=self.headers)
self.expected_success(204, resp.status)
return resp, body
def show_queue_stats(self, queue_name):
uri = '{0}/queues/{1}/stats'.format(self.uri_prefix, queue_name)
resp, body = self.get(uri, headers=self.headers)
body = json.loads(body)
self.validate_response(v2schema.queue_stats, resp, body)
return resp, body
def show_queue_metadata(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.get(uri, headers=self.headers)
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
def set_queue_metadata(self, queue_name, rbody):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
headers = self.headers.copy()
headers['Content-Type'] =\
'application/openstack-messaging-v2.0-json-patch'
resp, body = self.patch(uri, body=json.dumps(rbody),
headers=headers)
self.expected_success(200, resp.status)
return resp, body
def post_messages(self, queue_name, rbody):
uri = '{0}/queues/{1}/messages'.format(self.uri_prefix, queue_name)
resp, body = self.post(uri, body=json.dumps(rbody),
extra_headers=True,
headers=self.headers)
body = json.loads(body)
self.validate_response(v2schema.post_messages, resp, body)
return resp, body
def list_messages(self, queue_name):
uri = '{0}/queues/{1}/messages?echo=True'.format(self.uri_prefix,
queue_name)
resp, body = self.get(uri, extra_headers=True, headers=self.headers)
if resp['status'] != '204':
body = json.loads(body)
self.validate_response(v2schema.list_messages, resp, body)
return resp, body
def show_single_message(self, message_uri):
resp, body = self.get(message_uri, extra_headers=True,
headers=self.headers)
if resp['status'] != '204':
body = json.loads(body)
self.validate_response(v2schema.get_single_message, resp,
body)
return resp, body
def show_multiple_messages(self, message_uri):
resp, body = self.get(message_uri, extra_headers=True,
headers=self.headers)
if resp['status'] != '404':
body = json.loads(body)
self.validate_response(v2schema.get_multiple_messages,
resp,
body)
return resp, body
def delete_messages(self, message_uri):
resp, body = self.delete(message_uri, headers=self.headers)
self.expected_success(204, resp.status)
return resp, body
def post_claims(self, queue_name, rbody, url_params=False):
uri = '{0}/queues/{1}/claims'.format(self.uri_prefix, queue_name)
if url_params:
uri += '?%s' % urllib.parse.urlencode(url_params)
resp, body = self.post(uri, body=json.dumps(rbody),
extra_headers=True,
headers=self.headers)
if resp['status'] != '204':
body = json.loads(body)
self.validate_response(v2schema.claim_messages, resp, body)
return resp, body
def query_claim(self, claim_uri):
resp, body = self.get(claim_uri, headers=self.headers)
if resp['status'] != '204':
body = json.loads(body)
self.validate_response(v2schema.query_claim, resp, body)
return resp, body
def update_claim(self, claim_uri, rbody):
resp, body = self.patch(claim_uri, body=json.dumps(rbody),
headers=self.headers)
self.expected_success(204, resp.status)
return resp, body
def delete_claim(self, claim_uri):
resp, body = self.delete(claim_uri, headers=self.headers)
self.expected_success(204, resp.status)
return resp, body
def create_subscription(self, queue_name, rbody):
uri = '{0}/queues/{1}/subscriptions'.format(self.uri_prefix,
queue_name)
resp, body = self.post(uri, body=json.dumps(rbody),
extra_headers=True,
headers=self.headers)
body = json.loads(body)
self.validate_response(v2schema.create_subscription, resp, body)
return resp, body
def delete_subscription(self, queue_name, subscription_id):
uri = '{0}/queues/{1}/subscriptions/{2}'.format(self.uri_prefix,
queue_name,
subscription_id)
resp, body = self.delete(uri, headers=self.headers)
return resp, body
def list_subscription(self, queue_name):
uri = '{0}/queues/{1}/subscriptions'.format(self.uri_prefix,
queue_name)
resp, body = self.get(uri, headers=self.headers)
body = json.loads(body)
self.validate_response(v2schema.list_subscriptions, resp, body)
return resp, body
def show_subscription(self, | |
#!/usr/bin/env python
"""This is the GRR config management code.
This handles opening and parsing of config files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import copy
import errno
import io
import logging
import os
import platform
import re
import sys
import traceback
from absl import flags
import configparser
from future.builtins import str
from future.utils import iteritems
from future.utils import itervalues
from future.utils import string_types
from future.utils import with_metaclass
from typing import cast
from typing import Text
from grr_response_core.lib import lexer
from grr_response_core.lib import package
from grr_response_core.lib import registry
from grr_response_core.lib import type_info
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import precondition
from grr_response_core.lib.util.compat import yaml
# Default is set in distro_entry.py to be taken from package resource.
flags.DEFINE_string(
"config",
package.ResourcePath("grr-response-core",
"install_data/etc/grr-server.yaml"),
"Primary Configuration file to use. This is normally "
"taken from the installed package and should rarely "
"be specified.")
flags.DEFINE_list(
"secondary_configs", [],
"Secondary configuration files to load (These override "
"previous configuration files.).")
flags.DEFINE_bool("config_help", False, "Print help about the configuration.")
flags.DEFINE_list("context", [], "Use these contexts for the config.")
flags.DEFINE_bool("disallow_missing_config_definitions", False,
"If true, we raise an error on undefined config options.")
flags.DEFINE_multi_string(
"parameter",
default=[],
help="Global override of config values. "
"For example -p Database.implementation: MysqlDB",
short_name="p")
class Error(Exception):
"""Base class for configuration exceptions."""
class ConfigFormatError(Error, type_info.TypeValueError):
"""Raised when configuration file is formatted badly."""
class ConfigWriteError(Error):
"""Raised when we failed to update the config."""
class ConfigFileNotFound(IOError, Error):
"""Raised when a config file was expected but was not found."""
class UnknownOption(Error, KeyError):
"""Raised when an unknown option was requested."""
class InterpolationError(Error):
"""Raised when a config object failed to interpolate."""
class FilterError(InterpolationError):
"""Raised when a filter fails to perform its function."""
class ConstModificationError(Error):
"""Raised when the config tries to change a constant option."""
class AlreadyInitializedError(Error):
"""Raised when an option is defined after initialization."""
class MissingConfigDefinitionError(Error):
"""Raised when a config contains an undefined config option."""
class InvalidContextError(Error):
"""Raised when an invalid context is used."""
def SetPlatformArchContext():
"""Add the running contexts to the config system."""
# Initialize the running platform context:
_CONFIG.AddContext("Platform:%s" % platform.system().title())
machine = platform.uname()[4]
if machine in ["x86_64", "AMD64", "i686"]:
# 32 bit binaries running on AMD64 will still have a i386 arch.
if platform.architecture()[0] == "32bit":
arch = "i386"
else:
arch = "amd64"
elif machine == "x86":
arch = "i386"
else:
arch = machine
_CONFIG.AddContext("Arch:%s" % arch)
class ConfigFilter(with_metaclass(registry.MetaclassRegistry, object)):
"""A configuration filter can transform a configuration parameter."""
name = "identity"
# If this is set, application of the filter will not be logged - useful
# for key material.
sensitive_arg = False
def Filter(self, data):
precondition.AssertType(data, Text)
return data
class Literal(ConfigFilter):
"""A filter which does not interpolate."""
name = "literal"
class Lower(ConfigFilter):
name = "lower"
def Filter(self, data):
precondition.AssertType(data, Text)
return data.lower()
class Upper(ConfigFilter):
name = "upper"
def Filter(self, data):
precondition.AssertType(data, Text)
return data.upper()
class Filename(ConfigFilter):
name = "file"
def Filter(self, data):
precondition.AssertType(data, Text)
try:
with io.open(data, "r") as fd:
return fd.read() # pytype: disable=bad-return-type
except IOError as e:
raise FilterError("%s: %s" % (data, e))
class OptionalFile(ConfigFilter):
name = "optionalfile"
def Filter(self, data):
precondition.AssertType(data, Text)
try:
with io.open(data, "r") as fd:
return fd.read() # pytype: disable=bad-return-type
except IOError:
return ""
class FixPathSeparator(ConfigFilter):
"""A configuration filter that fixes the path speratator."""
name = "fixpathsep"
def Filter(self, data):
precondition.AssertType(data, Text)
if platform.system() == "Windows":
# This will fix "X:\", and might add extra slashes to other paths, but
# this is OK.
return data.replace("\\", "\\\\")
else:
return data.replace("\\", "/")
class Env(ConfigFilter):
"""Interpolate environment variables."""
name = "env"
def Filter(self, data):
precondition.AssertType(data, Text)
return compatibility.Environ(data.upper(), default="")
class Expand(ConfigFilter):
"""Expands the input as a configuration parameter."""
name = "expand"
def Filter(self, data):
precondition.AssertType(data, Text)
interpolated = _CONFIG.InterpolateValue(data)
# TODO(hanuszczak): This assertion should not be necessary but since the
# whole configuration system is one gigantic spaghetti, we can never be sure
# what is being returned.
precondition.AssertType(data, Text)
return cast(Text, interpolated)
class Flags(ConfigFilter):
"""Get the parameter from the flags."""
name = "flags"
def Filter(self, data):
precondition.AssertType(data, Text)
try:
logging.debug("Overriding config option with flags.FLAGS.%s", data)
attribute = getattr(flags.FLAGS, data)
# TODO(hanuszczak): Filters should always return strings and this juggling
# should not be needed. This is just a quick hack to fix prod.
if isinstance(attribute, bytes):
attribute = attribute.decode("utf-8")
elif not isinstance(attribute, Text):
attribute = str(attribute)
# TODO(hanuszczak): See TODO comment in the `Expand` filter.
precondition.AssertType(attribute, Text)
return cast(Text, attribute)
except AttributeError as e:
raise FilterError(e)
class Resource(ConfigFilter):
"""Locates a GRR resource that is shipped with the GRR package.
The format of the directive is "path/to/resource@package_name". If
package_name is not provided we use grr-resource-core by default.
"""
name = "resource"
default_package = "grr-response-core"
def Filter(self, filename_spec):
"""Use pkg_resources to find the path to the required resource."""
if "@" in filename_spec:
file_path, package_name = filename_spec.split("@")
else:
file_path, package_name = filename_spec, Resource.default_package
resource_path = package.ResourcePath(package_name, file_path)
if resource_path is not None:
return resource_path
# pylint: disable=unreachable
raise FilterError(
"Unable to find resource %s while interpolating: " % filename_spec)
# pylint: enable=unreachable
class ModulePath(ConfigFilter):
"""Locate the path to the specified module.
Note: A module is either a python file (with a .py extension) or a directory
with a __init__.py inside it. It is not the same as a resource (See Resource
above) since a module will be installed somewhere you can import it from.
Caveat: This will raise if the module is not a physically present on disk
(e.g. pyinstaller bundle).
"""
name = "module_path"
def Filter(self, name):
try:
return package.ModulePath(name)
except ImportError:
message = (
"Config parameter module_path expansion %r can not be imported." %
name)
# This exception will typically be caught by the expansion engine and
# be silently swallowed.
traceback.print_exc()
logging.error(message)
raise FilterError(message)
class GRRConfigParser(with_metaclass(registry.MetaclassRegistry, object)):
"""The base class for all GRR configuration parsers."""
# Configuration parsers are named. This name is used to select the correct
# parser from the --config parameter which is interpreted as a filename,
# except for files of the form reg://XXXX where XXXX is the key name.
name = None
# Set to True by the parsers if the file exists.
parsed = None
def SaveData(self, raw_data):
raise NotImplementedError()
def SaveDataToFD(self, raw_data, fd):
raise NotImplementedError()
def RawData(self):
"""Convert the file to a more suitable data structure.
Returns:
The standard data format from this method is for example:
{
name: default_value;
name2: default_value2;
"Context1": {
name: value,
name2: value,
"Nested Context" : {
name: value;
};
},
"Context2": {
name: value,
}
}
i.e. raw_data is an OrderedDict() with keys representing parameter names
and values representing values. Contexts are represented by nested
OrderedDict() structures with similar format.
Note that support for contexts is optional and depends on the config file
format. If contexts are not supported, a flat OrderedDict() is returned.
"""
class ConfigFileParser(configparser.RawConfigParser, GRRConfigParser):
"""A parser for ini style config files."""
def __init__(self, filename=None, data=None, fd=None):
super(ConfigFileParser, self).__init__()
self.optionxform = str
if fd:
self.parsed = self.read_file(fd)
self.filename = filename or fd.name
elif filename:
self.parsed = self.read(filename)
self.filename = filename
elif data is not None:
fd = io.StringIO(data)
# TODO(hanuszczak): Incorrect typings (`StringIO` is `IO`).
self.parsed = self.read_file(fd) # pytype: disable=wrong-arg-types
self.filename = filename
else:
raise Error("Filename not specified.")
def __str__(self):
return "<%s filename=\"%s\">" % (self.__class__.__name__, self.filename)
def SaveData(self, raw_data):
"""Store the raw data as our configuration."""
if self.filename is None:
raise IOError("Unknown filename")
logging.info("Writing back configuration to file %s", self.filename)
# Ensure intermediate directories exist
try:
os.makedirs(os.path.dirname(self.filename))
except (IOError, OSError):
pass
try:
# We can not use the standard open() call because we need to
# enforce restrictive file permissions on the created file.
mode = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
fd = os.open(self.filename, mode, 0o600)
with os.fdopen(fd, "w") as config_file:
self.SaveDataToFD(raw_data, config_file)
except OSError as e:
logging.warning("Unable to write config file %s: %s.", self.filename, e)
def SaveDataToFD(self, raw_data, fd):
"""Merge the raw data with the config file and store it."""
for key, value in iteritems(raw_data):
# TODO(hanuszczak): Incorrect type specification for `set`.
# pytype: disable=wrong-arg-types
self.set("", key, value=value)
# pytype: enable=wrong-arg-types
self.write(fd)
def RawData(self):
raw_data = collections.OrderedDict()
for section in self.sections():
for key, value in self.items(section):
raw_data[".".join([section, key])] = value
return raw_data
class YamlParser(GRRConfigParser):
"""A parser for yaml style config files."""
name = "yaml"
| |
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
# MIT License
#
# Copyright (c) 2022 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
import warnings
from functools import lru_cache
from numbers import Number
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import torch
from numpy.core.multiarray import normalize_axis_index
from PIL import Image
# ========================================================================= #
# Type Hints #
# ========================================================================= #
# from torch.testing._internal.common_utils import numpy_to_torch_dtype_dict
_NP_TO_TORCH_DTYPE = {
np.dtype('bool'): torch.bool,
np.dtype('uint8'): torch.uint8,
np.dtype('int8'): torch.int8,
np.dtype('int16'): torch.int16,
np.dtype('int32'): torch.int32,
np.dtype('int64'): torch.int64,
np.dtype('float16'): torch.float16,
np.dtype('float32'): torch.float32,
np.dtype('float64'): torch.float64,
np.dtype('complex64'): torch.complex64,
np.dtype('complex128'): torch.complex128
}
MinMaxHint = Union[Number, Tuple[Number, ...], np.ndarray]
@lru_cache()
def _dtype_min_max(dtype: torch.dtype) -> Tuple[Union[float, int], Union[float, int]]:
"""Get the min and max values for a dtype"""
dinfo = torch.finfo(dtype) if dtype.is_floating_point else torch.iinfo(dtype)
return dinfo.min, dinfo.max
@lru_cache()
def _check_image_dtype(dtype: torch.dtype):
"""Check that a dtype can hold image values"""
# check that the datatype is within the right range -- this is not actually necessary if the below is correct!
dmin, dmax = _dtype_min_max(dtype)
imin, imax = (0, 1) if dtype.is_floating_point else (0, 255)
assert (dmin <= imin) and (imax <= dmax), f'The dtype: {repr(dtype)} with range [{dmin}, {dmax}] cannot store image values in the range [{imin}, {imax}]'
# check the datatype is allowed
if dtype not in _ALLOWED_DTYPES:
raise TypeError(f'The dtype: {repr(dtype)} is not allowed, must be one of: {list(_ALLOWED_DTYPES)}')
# return the min and max values
return imin, imax
# ========================================================================= #
# Image Helper Functions #
# ========================================================================= #
def torch_image_has_valid_range(tensor: torch.Tensor, check_mode: Optional[str] = None) -> bool:
"""
Check that the range of values in the image is correct!
"""
if check_mode not in {'error', 'warn', 'bool', None}:
raise KeyError(f'invalid check_mode: {repr(check_mode)}')
# get the range for the dtype
imin, imax = _check_image_dtype(tensor.dtype)
# get the values
m = tensor.amin().cpu().numpy()
M = tensor.amax().cpu().numpy()
if (m < imin) or (imax < M):
if check_mode == 'error':
raise ValueError(f'images value range: [{m}, {M}] is outside of the required range: [{imin}, {imax}], for dtype: {repr(tensor.dtype)}')
elif check_mode == 'warn':
warnings.warn(f'images value range: [{m}, {M}] is outside of the required range: [{imin}, {imax}], for dtype: {repr(tensor.dtype)}')
return False
return True
@torch.no_grad()
def torch_image_clamp(tensor: torch.Tensor, clamp_mode: str = 'warn') -> torch.Tensor:
"""
Clamp the image based on the dtype
Valid `clamp_mode`s are {'warn', 'error', 'clamp'}
"""
# check range of values
if clamp_mode in ('warn', 'error'):
torch_image_has_valid_range(tensor, check_mode=clamp_mode)
elif clamp_mode != 'clamp':
raise KeyError(f'invalid clamp mode: {repr(clamp_mode)}')
# get the range for the dtype
imin, imax = _check_image_dtype(tensor.dtype)
# clamp!
return torch.clamp(tensor, imin, imax)
@torch.no_grad()
def torch_image_to_dtype(tensor: torch.Tensor, out_dtype: torch.dtype):
"""
Convert an image to the specified dtype
- Scaling is automatically performed based on the input and output dtype
Floats should be in the range [0, 1], integers should be in the range [0, 255]
- if precision will be lost (), then the values are clamped!
"""
_check_image_dtype(tensor.dtype)
_check_image_dtype(out_dtype)
# check scale
torch_image_has_valid_range(tensor, check_mode='error')
# convert
if tensor.dtype.is_floating_point and (not out_dtype.is_floating_point):
# [float -> int] -- cast after scaling
return torch.clamp(tensor * 255, 0, 255).to(out_dtype)
elif (not tensor.dtype.is_floating_point) and out_dtype.is_floating_point:
# [int -> float] -- cast before scaling
return torch.clamp(tensor.to(out_dtype) / 255, 0, 1)
else:
# [int -> int] | [float -> float]
return tensor.to(out_dtype)
@torch.no_grad()
def torch_image_normalize_channels(
tensor: torch.Tensor,
in_min: MinMaxHint,
in_max: MinMaxHint,
channel_dim: int = -1,
out_dtype: Optional[torch.dtype] = None
):
if out_dtype is None:
out_dtype = tensor.dtype
# check dtypes
_check_image_dtype(out_dtype)
assert out_dtype.is_floating_point, f'out_dtype must be a floating point, got: {repr(out_dtype)}'
# get norm values padded to the dimension of the channel
in_min, in_max = _torch_channel_broadcast_scale_values(in_min, in_max, in_dtype=tensor.dtype, dim=channel_dim, ndim=tensor.ndim)
# convert
tensor = tensor.to(out_dtype)
in_min = torch.as_tensor(in_min, dtype=tensor.dtype, device=tensor.device)
in_max = torch.as_tensor(in_max, dtype=tensor.dtype, device=tensor.device)
# warn if the values are the same
if torch.any(in_min == in_max):
m = in_min.cpu().detach().numpy()
M = in_min.cpu().detach().numpy()
warnings.warn(f'minimum: {m} and maximum: {M} values are the same, scaling values to zero.')
# handle equal values
divisor = in_max - in_min
divisor[divisor == 0] = 1
# normalize
return (tensor - in_min) / divisor
# ========================================================================= #
# Argument Helper #
# ========================================================================= #
# float16 doesnt always work, rather convert to float32 first
_ALLOWED_DTYPES = {
torch.float32, torch.float64,
torch.uint8,
torch.int, torch.int16, torch.int32, torch.int64,
torch.long,
}
@lru_cache()
def _torch_to_images_normalise_args(in_tensor_shape: Tuple[int, ...], in_tensor_dtype: torch.dtype, in_dims: str, out_dims: str, in_dtype: Optional[torch.dtype], out_dtype: Optional[torch.dtype]):
# check types
if not isinstance(in_dims, str): raise TypeError(f'in_dims must be of type: {str}, but got: {type(in_dims)}')
if not isinstance(out_dims, str): raise TypeError(f'out_dims must be of type: {str}, but got: {type(out_dims)}')
# normalise dim names
in_dims = in_dims.upper()
out_dims = out_dims.upper()
# check dim values
if sorted(in_dims) != sorted('CHW'): raise KeyError(f'in_dims contains the symbols: {repr(in_dims)}, must contain only permutations of: {repr("CHW")}')
if sorted(out_dims) != sorted('CHW'): raise KeyError(f'out_dims contains the symbols: {repr(out_dims)}, must contain only permutations of: {repr("CHW")}')
# get dimension indices
in_c_dim = in_dims.index('C') - len(in_dims)
out_c_dim = out_dims.index('C') - len(out_dims)
transpose_indices = tuple(in_dims.index(c) - len(in_dims) for c in out_dims)
# check image tensor
if len(in_tensor_shape) < 3:
raise ValueError(f'images must have 3 or more dimensions corresponding to: (..., {", ".join(in_dims)}), but got shape: {in_tensor_shape}')
if in_tensor_shape[in_c_dim] not in (1, 3):
raise ValueError(f'images do not have the correct number of channels for dim "C", required: 1 or 3. Input format is (..., {", ".join(in_dims)}), but got shape: {in_tensor_shape}')
# get default values
if in_dtype is None: in_dtype = in_tensor_dtype
if out_dtype is None: out_dtype = in_dtype
# check dtypes allowed
if in_dtype not in _ALLOWED_DTYPES: raise TypeError(f'in_dtype is not allowed, got: {repr(in_dtype)} must be one of: {list(_ALLOWED_DTYPES)}')
if out_dtype not in _ALLOWED_DTYPES: raise TypeError(f'out_dtype is not allowed, got: {repr(out_dtype)} must be one of: {list(_ALLOWED_DTYPES)}')
# done!
return transpose_indices, in_dtype, out_dtype, out_c_dim
def _torch_channel_broadcast_scale_values(
in_min: MinMaxHint,
in_max: MinMaxHint,
in_dtype: torch.dtype,
dim: int,
ndim: int,
) -> Tuple[List[Number], List[Number]]:
return __torch_channel_broadcast_scale_values(
in_min=tuple(np.array(in_min).reshape(-1).tolist()), # TODO: this is slow?
in_max=tuple(np.array(in_max).reshape(-1).tolist()), # TODO: this is slow?
in_dtype=in_dtype,
dim=dim,
ndim=ndim,
)
@lru_cache()
@torch.no_grad()
def __torch_channel_broadcast_scale_values(
in_min: MinMaxHint,
in_max: MinMaxHint,
in_dtype: torch.dtype,
dim: int,
ndim: int,
) -> Tuple[List[Number], List[Number]]:
# get the default values
in_min: np.ndarray = np.array((0.0 if in_dtype.is_floating_point else 0.0) if (in_min is None) else in_min)
in_max: np.ndarray = np.array((1.0 if in_dtype.is_floating_point else 255.0) if (in_max is None) else in_max)
# add missing axes
if in_min.ndim == 0: in_min = in_min[None]
if in_max.ndim == 0: in_max = in_max[None]
# checks
assert in_min.ndim == 1
assert in_max.ndim == 1
assert np.all(in_min <= in_max), f'min values are not <= the max values: {in_min} !<= {in_max}'
# normalize dim
dim = normalize_axis_index(dim, ndim=ndim)
# pad dim
r_pad = ndim - (dim + 1)
if r_pad > 0:
in_min = in_min[(...,) + ((None,)*r_pad)]
in_max = in_max[(...,) + ((None,)*r_pad)]
# done!
return in_min.tolist(), in_max.tolist()
# ========================================================================= #
# Image Conversion #
# ========================================================================= #
@torch.no_grad()
def torch_to_images(
tensor: torch.Tensor,
in_dims: str = 'CHW', # we always treat numpy by default as HWC, and torch.Tensor as CHW
out_dims: str = 'HWC',
in_dtype: Optional[torch.dtype] = None,
out_dtype: Optional[torch.dtype] = torch.uint8,
clamp_mode: str = 'warn', # clamp, warn, error
always_rgb: bool = False,
in_min: Optional[MinMaxHint] = None,
in_max: Optional[MinMaxHint] = None,
to_numpy: bool = False,
) -> Union[torch.Tensor, np.ndarray]:
"""
Convert a batch of image-like tensors to images.
A batch in this case consists of an arbitrary | |
"""Test functions based on illustrations and exercises from
the book (in progress):
<NAME>, Structured Probabilistic Reasoning
Draft available from:
http://www.cs.ru.nl/B.Jacobs/PAPERS/ProbabilisticReasoning.pdf
Instruction: (install and) run "pytest -v" on this file.
One can also copy code fragements from this file for own experiments
and variations.
"""
from efprob import *
##############################################################
#
# Chapter 1. Collections and Channels
#
##############################################################
def test_wet_grass():
#
# Spaces
#
A = Space("A", ['a', '~a'])
B = Space("B", ['b', '~b'])
C = Space("C", ['c', '~c'])
D = Space("D", ['d', '~d'])
E = Space("E", ['e', '~e'])
#
# State and channels
#
wi = flip(0.6,A)
sp = chan_fromstates([flip(0.2,B),flip(0.75,B)], A)
ra = chan_fromstates([flip(0.8,C),flip(0.1,C)], A)
wg = chan_fromstates([flip(0.95,D),flip(0.9,D),flip(0.8,D),flip(0,D)],B @ C)
sr = chan_fromstates([flip(0.7,E),flip(0,E)], C)
#
# Predictions about Bayesian network
#
assert (sp >> wi) == State([21/50, 29/50],B)
assert (ra >> wi) == State([13/25, 12/25],C)
assert (sr * ra >> wi) == State([91/250, 159/250],E)
assert (wg >> ((sp @ ra) >> (copy(A) >> wi))) \
== State([1399/2000, 601/2000],D)
#
# Exercise
#
assert ((wg @ sr) >> ((idn(B) @ copy(C)) >> ((sp @ ra) >> (copy(A) >> wi)))) \
== State([30443/100000, 39507/100000, 5957/100000, 24093/100000], D @ E)
#
# Wetness inferences (from Directed Models chapter)
#
joint = ((idn(A) @ idn(B) @ wg @ idn(C) @ sr) \
* (idn(A) @ copy(B) @ copy(C,3)) \
* (idn(A) @ sp @ ra) \
* copy(A,3)) >> wi
sr_pred = point_pred('e', E)
#
# Sprinkler probability, given a slippery road
#
assert (sp >> (wi / (ra << (sr << sr_pred)))) \
== State([63/260, 197/260],B)
assert (joint / (truth(A @ B @ D @ C) @ sr_pred)).MM(0,1,0,0,0) \
== State([63/260, 197/260],B)
#
# Wet grass probability, given a slippery road
#
assert (wg >> (((sp @ ra) >> (copy(A) >> wi)) / (truth(B) @ (sr << sr_pred)))) \
== State([4349/5200, 851/5200], D)
assert (joint / (truth(A @ B @ D @ C) @ sr_pred)).MM(0,0,1,0,0) \
== State([4349/5200, 851/5200], D)
##############################################################
#
# Chapter 2. Predicates and Observables
#
##############################################################
def test_validity():
#
# Coin toss with win/loose reward and expected outcome
#
s = flip(0.3)
rv = Predicate([100, -50], bool_sp)
assert (s >= rv) == -5
#
# Expected sum of two/three dices
#
two_sum = pred_fromfun(lambda x,y: x+y, pips_sp @ pips_sp)
assert np.isclose(dice @ dice >= two_sum, 7)
three_sum = pred_fromfun(lambda x,y,z: x+y+z, pips_sp ** 3)
assert np.isclose(dice @ dice @ dice >= three_sum, 21/2)
def test_evenish():
#
# Validity and conditioning for a dice with a fuzzy predicate
#
evenish = Predicate([1/5, 9/10, 1/10, 9/10, 1/10, 4/5], pips_sp)
assert (dice >= evenish) == 1/2
assert (dice / evenish) == State([1/15, 3/10, 1/30, 3/10, 1/30, 4/15],
pips_sp)
def test_burlargy_alarm():
#
# A burglarty-alarm example due to Pearl, with crossover influence
#
A = Space("A", ['a','~a'])
B = Space("B", ['b','~b'])
w = State([0.000095,0.009999,0.000005,0.989901], A @ B)
assert w.MM(0,1) == State([0.0001, 0.9999], B)
p = Predicate([0.8, 0.2], A)
assert np.isclose((w >= (p @ truth(B))), 0.206056)
assert (w / (p @ truth(B))) \
== State([0.00036883,
0.03882043,
4.85e-06,
0.9608], A @ B)
assert (w / (p @ truth(B))).MM(0,1) == State([0.00037368,
0.99962], B)
def test_two_urns_draw():
#
# Two urns with white and black balls, and a draw from either of them.
#
B = Space("Balls", ['W', 'B'])
c = chan_fromstates([State([2/9,11/9],B), State([5/11,6/11],B)], coin_sp)
assert (coin(0.5) / (c << point_pred('W',B))) \
== State([22/67, 45/67], coin_sp)
def test_taxi_cab():
#
# Example from Tverski-Kahneman'82, with base rate neglect
#
C = Space("Colours", ['G', 'B'])
w = flip(0.85, C)
c = chan_fromstates([flip(0.8, C), flip(0.2,C)], C)
assert (w / (c << point_pred('B', C))) == State([17/29, 12/29], C)
def test_blood_medicine():
B = Space(None, ['H', 'L'])
M = range_sp(3)
b = chan_fromstates([flip(2/3,B), flip(7/9,B), flip(5/8,B)], M)
prior = State([3/20, 9/20, 2/5], M)
assert (b >> prior) == State([7/10, 3/10], B)
p12 = Predicate([0,1,1], M)
assert (b >> (prior / p12)) == State([12/17, 5/17], B)
q1 = Predicate([1,0], B)
print( b << q1 )
print( prior >= b << q1 )
assert (prior / (b << q1)) == State([1/7, 1/2, 5/14], M)
q2 = Predicate([0.95,0.05], B)
assert (prior / (b << q2)) == State([0.143382, 0.49632, 0.360294], M)
def five_candy_bag():
#
# Example from Russel-Norvig, 20.1, about successive lime candy
# draws and updating of the distribution of candy bags
#
H = Space("Bags", [1,2,3,4,5])
C = Space("Candies", ['C', 'L'])
pC = point_pred('C',C)
pL = point_pred('L',C)
h = chan_fromstates([flip(1, C),
flip(3/4, C),
flip(1/2, C),
flip(1/4, C),
flip(0, C)], H)
prior = State([1/10, 1/5, 2/5, 1/5, 1/10], H)
assert ( (prior / pL) == State([0, 1/10, 2/5, 3/10, 2/5], H) )
assert ( (prior / pL ** 2) == State([0, 1/26, 4/13, 9/26, 4/13], H) )
assert ( (prior / pL ** 3) == State([0, 1/76, 4/19, 17/76, 8/19], H) )
def test_capture_recapture():
N = 20
fish_sp = Space(None, [10 * i for i in range(2, 31)])
prior = uniform_state(fish_sp)
chan = chan_fromklmap(lambda d: binomial(N)(N/d), fish_sp, range_sp(N+1))
#(chan >> prior).plot()
posterior = prior / (chan << point_pred(5, range_sp(N+1)))
#
# Expected number after catching 5 marked
#
assert np.isclose(posterior.expectation(),
116.491)
#
# Expected number after catching 10 marked
#
assert np.isclose((prior /
(chan << point_pred(10, range_sp(N+1)))).expectation(),
47.481)
#posterior.plot()
def test_disease_test():
disease_sp = Space(None, ['d', '~d'])
prior = flip(1/100, disease_sp)
test_sp = Space(None, ['p', 'n'])
test_pred = Predicate([1,0], test_sp)
sensitivity = chan_fromstates([flip(9/10,test_sp), flip(1/20,test_sp)],
disease_sp)
#
# Prediction
#
assert (sensitivity >> prior) == State([117/2000, 1883/2000], test_sp)
#
# Explanation
#
assert (prior / (sensitivity << test_pred)) \
== State([18/117, 99/117], disease_sp)
def test_resignations():
#
# Exercise
#
S = Space("Store", ['A', 'B', 'C'])
G = Space("Gender", ['M', 'F'])
w = State([50/225, 75/225, 100/225], S)
c = chan_fromstates([State([0.5,0.5],G),
State([0.4,0.6],G),
State([0.3,0.7],G)], S)
assert (w / (c << point_pred('F',G))) == State([5/28, 9/28, 14/28], S)
def test_coin_parameter():
N = 100
prior = discretized_uniform(0, 1, N)
chan = chan_fromklmap(lambda r: flip(r), prior.sp, bool_sp)
assert (chan >> prior) == State([1/2, 1/2], bool_sp)
observations = [0,1,1,1,0,0,1,1]
s = prior
#s.plot(10)
for ob in observations:
pred = yes_pred if ob==1 else no_pred
s = s / (chan << pred)
#s.plot(10)
#
# learned coin
#
assert (chan >> s) == State([3/5, 2/5], bool_sp)
#
# Expected value
#
assert s.expectation() == 0.599999985316273
def test_asia_visit():
A = Space("asia", ['a', '~a'])
S = Space("smoking", ['s', '~s'])
T = Space("tuberculosis", ['t', '~t'])
E = Space("either", ['e', '~e'])
L = Space("cancer", ['l', '~l'])
X = Space("xray", ['x', '~x'])
D = Space("dyspnoea", ['d', '~d'])
B = Space("bronchitis", ['b', '~b'])
#
# Initial states
#
asia = flip(0.01, A)
smoking = flip(0.5, S)
#
# Channels
#
tub = chan_fromstates([flip(0.05,T), flip(0.01,T)], A)
either = chan_fromstates([flip(1,E), flip(1,E), flip(1,E), flip(0,E)],
L @ T)
lung = chan_fromstates([flip(0.1,L), flip(0.01,L)], S)
xray = chan_fromstates([flip(0.98,X), flip(0.05,X)], E)
dysp = chan_fromstates([flip(0.9,D), flip(0.7,D), flip(0.8,D), flip(0.1,D)],
B @ E)
bronc = chan_fromstates([flip(0.6,B), flip(0.3,B)], S)
#
# Add wires from internal nodes to the outside, so that the 8 outgoing
# wires are respectively:
# 1. smoking
# 2. broncchitis
# 3. lung
# 4. dyspnoea
# 5. either
# 6. xray
# 7. tuberculosis
# 8. asia
#
asia_joint = ((idn(S @ B @ L) @ dysp @ idn(E) @ xray @ idn(T @ A)) \
* (idn(S @ B) @ swap(B, L) @ copy(E,3) @ idn(T @ A)) \
* (idn(S @ B @ B @ L) @ either @ idn(T @ A)) \
* (idn(S) @ copy(B) @ copy(L) @ copy(T) @ idn(A)) \
* (idn(S) @ bronc @ lung @ tub @ idn(A)) \
* (copy(S,3) @ copy(A))) \
>> smoking @ asia
#
# Likelihood of lung cancer, given no bronchitis
#
B_pred = point_pred('b', B)
assert (lung >> (smoking / (bronc << ~B_pred))) \
== State([0.042727, 0.95727], L)
assert ((asia_joint/(truth(S) @ ~B_pred @ truth(L @ D @ E @ X @ T @ A))). \
MM(0,0,1,0,0,0,0,0)) == State([0.042727, 0.95727], L)
#
# likelihood of smoking, given positive xray, computed in many
# different ways
#
X_pred = point_pred('x', X)
| |
# "Grinch Finger Wallpaper": "",
# "Naughty Present": "",
# "Baby Grinch's Mischief Whistle": "",
# "Ice Queen Trophy": "",
# "Santa-NK1 Trophy": "",
# "Everscream Trophy": "",
# "Music Box (Pumpkin Moon)": "",
# "Music Box (Alt Underground)": "",
# "Music Box (Frost Moon)": "",
# "Brown Paint": "",
# "Shadow Paint": "",
# "Negative Paint": "",
# "Team Dye": "",
# "Amethyst Gemspark Block": "",
# "Topaz Gemspark Block": "",
# "Sapphire Gemspark Block": "",
# "Emerald Gemspark Block": "",
# "Ruby Gemspark Block": "",
# "Diamond Gemspark Block": "",
# "Amber Gemspark Block": "",
# "Life Hair Dye": "",
# "Mana Hair Dye": "",
# "Depth Hair Dye": "",
# "Money Hair Dye": "",
# "Time Hair Dye": "",
# "Team Hair Dye": "",
# "Biome Hair Dye": "",
# "Party Hair Dye": "",
# "Rainbow Hair Dye": "",
# "Speed Hair Dye": "",
# "Angel Halo": "",
# "Fez": "",
# "Womannequin": "",
# "Hair Dye Remover": "",
# "Bug Net": "",
# "Firefly": "",
# "Firefly in a Bottle": "",
# "Monarch Butterfly": "",
# "Purple Emperor Butterfly": "",
# "Red Admiral Butterfly": "",
# "Ulysses Butterfly": "",
# "Sulphur Butterfly": "",
# "Tree Nymph Butterfly": "",
# "Zebra Swallowtail Butterfly": "",
# "Julia Butterfly": "",
# "Worm": "",
# "Mouse": "",
# "Lightning Bug": "",
# "Lightning Bug in a Bottle": "",
# "Snail": "",
# "Glowing Snail": "",
# "Fancy Gray Wallpaper": "",
# "Ice Floe Wallpaper": "",
# "Music Wallpaper": "",
# "Purple Rain Wallpaper": "",
# "Rainbow Wallpaper": "",
# "Sparkle Stone Wallpaper": "",
# "Starlit Heaven Wallpaper": "",
# "Bird": "",
# "Blue Jay": "",
# "Cardinal": "",
# "Squirrel": "",
# "Bunny": "",
# "Cactus Bookcase": "",
# "Ebonwood Bookcase": "",
# "Flesh Bookcase": "",
# "Honey Bookcase": "",
# "Steampunk Bookcase": "",
# "Glass Bookcase": "",
# "Rich Mahogany Bookcase": "",
# "Pearlwood Bookcase": "",
# "Spooky Bookcase": "",
# "Skyware Bookcase": "",
# "Lihzahrd Bookcase": "",
# "Frozen Bookcase": "",
# "Cactus Lantern": "",
# "Ebonwood Lantern": "",
# "Flesh Lantern": "",
# "Honey Lantern": "",
# "Steampunk Lantern": "",
# "Glass Lantern": "",
# "<NAME> Lantern": "",
# "Pearlwood Lantern": "",
# "Frozen Lantern": "",
# "Lihzahrd Lantern": "",
# "Skyware Lantern": "",
# "Spooky Lantern": "",
# "Frozen Door": "",
# "Cactus Candle": "",
# "Ebonwood Candle": "",
# "Flesh Candle": "",
# "Glass Candle": "",
# "Frozen Candle": "",
# "<NAME> Candle": "",
# "Pearlwood Candle": "",
# "Lihzahrd Candle": "",
# "Skyware Candle": "",
# "Pumpkin Candle": "",
# "Cactus Chandelier": "",
# "Ebonwood Chandelier": "",
# "Flesh Chandelier": "",
# "Honey Chandelier": "",
# "Frozen Chandelier": "",
# "<NAME> Chandelier": "",
# "Pearlwood Chandelier": "",
# "Lihzahrd Chandelier": "",
# "Skyware Chandelier": "",
# "Spooky Chandelier": "",
# "Glass Chandelier": "",
# "Cactus Bed": "",
# "Flesh Bed": "",
# "Frozen Bed": "",
# "Lihzahrd Bed": "",
# "Skyware Bed": "",
# "Spooky Bed": "",
# "Cactus Bathtub": "",
# "Ebonwood Bathtub": "",
# "Flesh Bathtub": "",
# "Glass Bathtub": "",
# "Frozen Bathtub": "",
# "<NAME> Bathtub": "",
# "Pearlwood Bathtub": "",
# "Lihzahrd Bathtub": "",
# "Skyware Bathtub": "",
# "Spooky Bathtub": "",
# "Cactus Lamp": "",
# "Ebonwood Lamp": "",
# "Flesh Lamp": "",
# "Glass Lamp": "",
# "Frozen Lamp": "",
# "Rich Mahogany Lamp": "",
# "Pearlwood Lamp": "",
# "Lihzahrd Lamp": "",
# "Skyware Lamp": "",
# "Spooky Lamp": "",
# "Cactus Candelabra": "",
# "Ebonwood Candelabra": "",
# "Flesh Candelabra": "",
# "Honey Candelabra": "",
# "Steampunk Candelabra": "",
# "Glass Candelabra": "",
# "Rich Mahogany Candelabra": "",
# "Pearlwood Candelabra": "",
# "Frozen Candelabra": "",
# "Lihzahrd Candelabra": "",
# "Skyware Candelabra": "",
# "Spooky Candelabra": "",
# "Brain of Cthulhu Mask": "",
# "Wall of Flesh Mask": "",
# "Twin Mask": "",
# "Skeletron Prime Mask": "",
# "Plantera Mask": "",
# "Golem Mask": "",
# "Eater of Worlds Mask": "",
# "Eye of Cthulhu Mask": "",
# "Destroyer Mask": "",
# "Blacksmith Rack": "",
# "Carpentry Rack": "",
# "Helmet Rack": "",
# "Spear Rack": "",
# "Sword Rack": "",
# "Stone Slab": "",
# "Sandstone Slab": "",
# "Frog": "",
# "Mallard Duck": "",
# "Duck": "",
# "Honey Bathtub": "",
# "Steampunk Bathtub": "",
# "Living Wood Bathtub": "",
# "Shadewood Bathtub": "",
# "Bone Bathtub": "",
# "Honey Lamp": "",
# "Steampunk Lamp": "",
# "Living Wood Lamp": "",
# "Shadewood Lamp": "",
# "Golden Lamp": "",
# "Bone Lamp": "",
# "Living Wood Bookcase": "",
# "Shadewood Bookcase": "",
# "Golden Bookcase": "",
# "Bone Bookcase": "",
# "Living Wood Bed": "",
# "Bone Bed": "",
# "Living Wood Chandelier": "",
# "Shadewood Chandelier": "",
# "Golden Chandelier": "",
# "Bone Chandelier": "",
# "Living Wood Lantern": "",
# "Shadewood Lantern": "",
# "Golden Lantern": "",
# "Bone Lantern": "",
# "Living Wood Candelabra": "",
# "Shadewood Candelabra": "",
# "Golden Candelabra": "",
# "Bone Candelabra": "",
# "Living Wood Candle": "",
# "Shadewood Candle": "",
# "Golden Candle": "",
# "Black Scorpion": "",
# "Scorpion": "",
# "Bubble Wallpaper": "",
# "Copper Pipe Wallpaper": "",
# "Ducky Wallpaper": "",
# "Frost Core": "",
# "Bunny Cage": "",
# "Squirrel Cage": "",
# "Mallard Duck Cage": "",
# "Duck Cage": "",
# "Bird Cage": "",
# "Blue Jay Cage": "",
# "Cardinal Cage": "",
# "Waterfall Wall": "",
# "Lavafall Wall": "",
# "Crimson Seeds": "",
# "Heavy Work Bench": "",
# "Copper Plating": "",
# "Snail Cage": "",
# "Glowing Snail Cage": "",
# "Shroomite Digging Claw": "",
# "Ammo Box": "",
# "Monarch Butterfly Jar": "",
# "Purple Emperor Butterfly Jar": "",
# "Red Admiral Butterfly Jar": "",
# "Ulysses Butterfly Jar": "",
# "Sulphur Butterfly Jar": "",
# "Tree Nymph Butterfly Jar": "",
# "Zebra Swallowtail Butterfly Jar": "",
# "Julia Butterfly Jar": "",
# "Scorpion Cage": "",
# "Black Scorpion Cage": "",
# "Venom Staff": "",
# "Spectre Mask": "",
# "Frog Cage": "",
# "Mouse Cage": "",
# "Bone Welder": "",
# "Flesh Cloning Vat": "",
# "Glass Kiln": "",
# "Lihzahrd Furnace": "",
# "Living Loom": "",
# "Sky Mill": "",
# "Ice Machine": "",
# "Beetle Helmet": "",
# "Beetle Scale Mail": "",
# "Beetle Shell": "",
# "Beetle Leggings": "",
# "Steampunk Boiler": "",
# "Honey Dispenser": "",
# "Penguin": "",
# "Penguin Cage": "",
# "Worm Cage": "",
# "Terrarium": "",
# "Super Mana Potion": "",
# "Ebonwood Fence": "",
# "Rich Mahogany Fence": "",
# "Pearlwood Fence": "",
# "Shadewood Fence": "",
# "Brick Layer": "",
# "Extendo Grip": "",
# "Paint Sprayer": "",
# "Portable Cement Mixer": "",
# "Beetle Husk": "",
# "Celestial Magnet": "",
# "Celestial Emblem": "",
# "Celestial Cuffs": "",
# "Peddler's Hat": "",
# "Pulse Bow": "",
# "Large Dynasty Lantern": "",
# "Dynasty Lamp": "",
# "Dynasty Lantern": "",
# "Large Dynasty Candle": "",
# "Dynasty Chair": "",
# "Dynasty Work Bench": "",
# "Dynasty Chest": "",
# "Dynasty Bed": "",
# "Dynasty Bathtub": "",
# "Dynasty Bookcase": "",
# "Dynasty Cup": "",
# "Dynasty Bowl": "",
# "Dynasty Candle": "",
# "Dynasty Clock": "",
# "Golden Clock": "",
# "Glass Clock": "",
# "Honey Clock": "",
# "Steampunk Clock": "",
# "Fancy Dishes": "",
# "Glass Bowl": "",
# "Wine Glass": "",
# "Living Wood Piano": "",
# "Flesh Piano": "",
# "Frozen Piano": "",
# "Frozen Table": "",
# "Honey Chest": "",
# "Steampunk Chest": "",
# "Honey Work Bench": "",
# "Frozen Work Bench": "",
# "Steampunk Work Bench": "",
# "Glass Piano": "",
# "Honey Piano": "",
# "Steampunk Piano": "",
# "Honey Cup": "",
# "Chalice": | |
auto()
Demades = auto()
Demaratos = auto()
Demarmenos = auto()
Demas = auto()
Demeas = auto()
Demetrios = auto()
Democedes = auto()
Demodocus = auto()
Demokrates = auto()
Demoleon = auto()
Demonax = auto()
Demonous = auto()
Demophlos = auto()
Demosthenes = auto()
Deon = auto()
Derkylos = auto()
Deukalion = auto()
Dexicos = auto()
Dexios = auto()
Diactorides = auto()
Diadromes = auto()
Diadumenus = auto()
Diagoras = auto()
Dicaeus = auto()
Dieneces = auto()
Diocles = auto()
Diodoros = auto()
Diodorus = auto()
Diokles = auto()
Diomedes = auto()
Dionysios = auto()
Dionysophanes = auto()
Dionysos = auto()
Diophantus = auto()
Diores = auto()
Dioscuros = auto()
Diotrephes = auto()
Dismas = auto()
Dithyrambos = auto()
Dmetor = auto()
Dolon = auto()
Dolops = auto()
Doreios = auto()
Doreius = auto()
Dorian = auto()
Doriskos = auto()
Doros = auto()
Dorotheus = auto()
Doryssos = auto()
Dosithios = auto()
Drimylos = auto()
Dromeus = auto()
Dryas = auto()
Dryops = auto()
Ducetius = auto()
Duris = auto()
Dymas = auto()
Dymnos = auto()
Echekrates = auto()
Echelaos = auto()
Echemmon = auto()
Echemus = auto()
Echephron = auto()
Echepolus = auto()
Echestratos = auto()
Eetion = auto()
Eioneus = auto()
Eirenaios = auto()
Elasus = auto()
Elatos = auto()
Elatreus = auto()
Eleon = auto()
Elephenor = auto()
Elpenor = auto()
Elpides = auto()
Elpidius = auto()
Empedocles = auto()
Endios = auto()
Endymion = auto()
Engenes = auto()
Eniopus = auto()
Ennaeus = auto()
Ennomus = auto()
Ennychus = auto()
Enops = auto()
Eos = auto()
Epaenetus = auto()
Epaphos = auto()
Epaphroditus = auto()
Epeigeus = auto()
Epeius = auto()
Ephialtes = auto()
Epicurus = auto()
Epicydes = auto()
Epikrates = auto()
Epimenes = auto()
Epiphanes = auto()
Epistor = auto()
Epistrophos = auto()
Epitrophos = auto()
Epizelos = auto()
Erasistratus = auto()
Eratosthenes = auto()
Eratostheres = auto()
Erechtheus = auto()
Eretmenus = auto()
Ereuthalion = auto()
Erginus = auto()
Ergiyios = auto()
Erichthonius = auto()
Erxandros = auto()
Eryalus = auto()
Erysichton = auto()
Eryx = auto()
Eryximachos = auto()
Eteocles = auto()
Eteokles = auto()
Eteonous = auto()
Euaemon = auto()
Eualcidas = auto()
Euanthes = auto()
Euarestos = auto()
Eubalus = auto()
Eubulus = auto()
Eucarpus = auto()
Euchenor = auto()
Eucleides = auto()
Eudorus = auto()
Eudoxsus = auto()
Eudoxus = auto()
Euenius = auto()
Euenor = auto()
Euenus = auto()
Eugammon = auto()
Eugenios = auto()
Eugenius = auto()
Euhemenis = auto()
Euippus = auto()
Eukles = auto()
Eumaeus = auto()
Eumastas = auto()
Eumelus = auto()
Eumenes = auto()
Eumneus = auto()
Eumolpus = auto()
Euneas = auto()
Euonomos = auto()
Eupalinus = auto()
Euphenes = auto()
Euphorbos = auto()
Euphorion = auto()
Euphronios = auto()
Eupolos = auto()
Euripides = auto()
Euryanax = auto()
Eurybates = auto()
Eurybiades = auto()
Eurycliedes = auto()
Eurydamus = auto()
Eurydemon = auto()
Eurydemos = auto()
Euryhus = auto()
Eurykrates = auto()
Eurykratides = auto()
Euryleon = auto()
Eurylochos = auto()
Eurymachos = auto()
Euryphon = auto()
Eurypylos = auto()
Eurystenes = auto()
Eurysthenes = auto()
Eurystheus = auto()
Eurysthios = auto()
Eurythion = auto()
Eurytos = auto()
Eussorus = auto()
Euthydemos = auto()
Euthynos = auto()
Eutropios = auto()
Eutuches = auto()
Eutychides = auto()
Eutychus = auto()
Evaenetos = auto()
Evagoras = auto()
Evandros = auto()
Evanetus = auto()
Evelthon = auto()
Evenios = auto()
Evenus = auto()
Evios = auto()
Exaduis = auto()
Exekias = auto()
Faenus = auto()
Galenus = auto()
Gallus = auto()
Ganymedes = auto()
Gauanes = auto()
Geleon = auto()
Gelo = auto()
Gelon = auto()
Gennadios = auto()
Gerasimos = auto()
Giorgius = auto()
Glaukias = auto()
Glaukos = auto()
Glycon = auto()
Gnipho = auto()
Gordias = auto()
Gorgias = auto()
Gorgion = auto()
Gorgos = auto()
Gorgythion = auto()
Gregorius = auto()
Gryllus = auto()
Gurgos = auto()
Gylippos = auto()
Gyras = auto()
Gyrtias = auto()
Haemon = auto()
Hagias = auto()
Hagnon = auto()
Halisthertes = auto()
Halius = auto()
Harmatidas = auto()
Harmocydes = auto()
Harmodios = auto()
Harmon = auto()
Harpagos = auto()
Harpalion = auto()
Harpalos = auto()
Harpocras = auto()
Hecataeus = auto()
Hegesandros = auto()
Hegesistratos = auto()
Hegetoridas = auto()
Heirax = auto()
Heiron = auto()
Hektor = auto()
Helenos = auto()
Helgesippos = auto()
Helicaon = auto()
Heliodorus = auto()
Helios = auto()
Helle = auto()
Hephaestos = auto()
Herakleides = auto()
Herakleitos = auto()
Heraklides = auto()
Hermeias = auto()
Hermeros = auto()
Hermippos = auto()
Hermogenes = auto()
Hermolaos = auto()
Hermolycus = auto()
Hermon = auto()
Hermotimos = auto()
Hero = auto()
Herodes = auto()
Herodianus = auto()
Herodion = auto()
Heromenes = auto()
Hicetaon = auto()
Hiero = auto()
Hieronymus = auto()
Hipparchos = auto()
Hipparinos = auto()
Hippasus = auto()
Hippias = auto()
Hippoklides = auto()
Hippokratides = auto()
Hippolytos = auto()
Hippomachos = auto()
Hippomenes = auto()
Hippon = auto()
Hipponax = auto()
Hipponicus = auto()
Hipponous = auto()
Hippotas = auto()
Hippothous = auto()
Hippotion = auto()
Hoiples = auto()
Homeros = auto()
Hyakinthos = auto()
Hylas = auto()
Hyllos = auto()
Hyllus = auto()
Hypatius = auto()
Hypeirochus = auto()
Hypenor = auto()
Hyperenor = auto()
Hyperion = auto()
Hypsenor = auto()
Hyrcanus = auto()
Hyrtacus = auto()
Hyrtius = auto()
Iakchos = auto()
Ialmenes = auto()
Iambulus = auto()
Iamus = auto()
Iasos = auto()
Iatragoras = auto()
Iatrokles = auto()
Ibanolis = auto()
Ibykos = auto()
Icarion = auto()
Icarius = auto()
Icarus = auto()
Idaeus = auto()
Idaios = auto()
Idas = auto()
Idomeneus = auto()
Ilioneus = auto()
Illyrius = auto()
Ilus = auto()
Imbrasus = auto()
Imbrius = auto()
Imbrus = auto()
Inachos = auto()
Inachus = auto()
Inaros = auto()
Iobates = auto()
Iolaos = auto()
Iollas = auto()
Ion = auto()
Iphiclus = auto()
Iphicrates = auto()
Iphikrates = auto()
Iphinous = auto()
Iphitos = auto()
Iphitus = auto()
Iros = auto()
Irus = auto()
Isagoras = auto()
Isandros = auto()
Ischenous = auto()
Isidor = auto()
Isidoros = auto()
Ision = auto()
Ismaros = auto()
Ismenios = auto()
Isocrates = auto()
Isodemos = auto()
Isokrates = auto()
Itheus = auto()
Itylus = auto()
Itys = auto()
Kadmos = auto()
Kaenas = auto()
Kaeneus = auto()
Kalchas = auto()
Kalesius = auto()
Kaletor = auto()
Kalliaros = auto()
Kallias = auto()
Kallikles = auto()
Kallikrates = auto()
Kallimachos = auto()
Kallinicus = auto()
Kallinos = auto()
Kallipides = auto()
Kallipos = auto()
Kallisthenes = auto()
Kallon = auto()
Kameirus = auto()
Kandaules = auto()
Kannadis = auto()
Kapaneus = auto()
Kapys = auto()
Karipos = auto()
Karopophores = auto()
Kasos = auto()
Kassandros = auto()
Kaunos = auto()
Kebalinos = auto()
Kebes = auto()
Kekrops = auto()
Keos = auto()
Kephalon = auto()
Kephalos = auto()
Kerameikos = auto()
Kerkyon = auto()
Keteus = auto()
Kimon = auto()
Kirphis = auto()
Kittos = auto()
Kleitos = auto()
Kleobis = auto()
Kleomenes = auto()
Koines = auto()
Koinos = auto()
Konon = auto()
Koragos = auto()
Korax = auto()
Kosmas = auto()
Krantor = auto()
Krateros = auto()
Kreon = auto()
Krinippos = auto()
Kristos = auto()
Kritias = auto()
Kritoboulos = auto()
Kritodemos = auto()
Kriton = auto()
Kroisos = auto()
Krokinos = auto()
Ktesiphon = auto()
Kyknos = auto()
Kynaegeiros = auto()
Kyrillos = auto()
Kyrios = auto()
Kyros = auto()
Labdacus = auto()
Labotas = auto()
Laertes = auto()
Lagos = auto()
Laios = auto()
Lamachos = auto()
Lampo = auto()
Lampon = auto()
Lampus = auto()
Lamus = auto()
Laodamas = auto()
Laodocus = auto()
Laogonus = auto()
Laomedon = auto()
Laphanes = auto()
Lasos | |
Spider and use other storage engines for you
default task queue.
"""
# If queue is still not configured
# then configure it with default backend
if self.taskq is None:
self.setup_queue()
def process_task_generator(self):
"""
Load new tasks from `self.task_generator_object`
Create new tasks.
If task queue size is less than some value
then load new tasks from tasks file.
"""
if self.task_generator_enabled:
if hasattr(self.taskq, 'qsize'):
qsize = self.taskq.qsize()
else:
qsize = self.taskq.size()
if self.max_task_generator_chunk is not None:
min_limit = min(self.max_task_generator_chunk,
self.thread_number * 10)
else:
min_limit = self.thread_number * 10
if qsize < min_limit:
logger_verbose.debug('Task queue contains less tasks than limit. Tryring to add new tasks')
try:
for x in xrange(min_limit - qsize):
item = next(self.task_generator_object)
logger_verbose.debug('Got new item from generator. Processing it.')
#self.add_task(item)
self.process_handler_result(item)
except StopIteration:
# If generator have no values to yield
# then disable it
logger_verbose.debug('Task generator has no more tasks. Disabling it')
self.task_generator_enabled = False
def init_task_generator(self):
"""
Process `initial_urls` and `task_generator`.
Generate first portion of tasks.
TODO: task generator should work in separate OS process
"""
self.task_generator_object = self.task_generator()
self.task_generator_enabled = True
logger_verbose.debug('Processing initial urls')
self.load_initial_urls()
# Initial call to task generator
# before main cycle
self.process_task_generator()
def load_new_task(self):
start = time.time()
while True:
try:
with self.save_timer('task_queue'):
return self.taskq.get()
except queue.Empty:
if self.taskq.size():
logger_verbose.debug('Waiting for scheduled task')
return True
if not self.slave:
logger_verbose.debug('Task queue is empty.')
return None
else:
# Temporarly hack which force slave crawler
# to wait 5 seconds for new tasks, this solves
# the problem that sometimes slave crawler stop
# its work because it could not receive new
# tasks immediatelly
if not self.transport.active_task_number():
if time.time() - start < 5:
time.sleep(0.1)
logger.debug('Slave sleeping')
else:
break
else:
break
logger_verbose.debug('Task queue is empty.')
return None
def process_task_counters(self, task):
task.network_try_count += 1
if task.task_try_count == 0:
task.task_try_count = 1
def create_grab_instance(self, **kwargs):
# Back-ward compatibility for deprecated `grab_config` attribute
# Use _grab_config to not trigger warning messages
if self._grab_config and kwargs:
merged_config = deepcopy(self._grab_config)
merged_config.update(kwargs)
return Grab(**merged_config)
elif self._grab_config and not kwargs:
return Grab(**self._grab_config)
else:
return Grab(**kwargs)
def setup_grab_for_task(self, task):
grab = self.create_grab_instance()
if task.grab_config:
grab.load_config(task.grab_config)
else:
grab.setup(url=task.url)
# Generate new common headers
grab.config['common_headers'] = grab.common_headers()
return grab
def is_task_cacheable(self, task, grab):
if (# cache is disabled for all tasks
not self.cache_enabled
# cache data should be refreshed
or task.get('refresh_cache', False)
# cache could not be used
or task.get('disable_cache', False)
# request type is not cacheable
or grab.detect_request_method() != 'GET'):
return False
else:
return True
def load_task_from_cache(self, transport, task, grab, grab_config_backup):
cache_item = self.cache.get_item(grab.config['url'],
timeout=task.cache_timeout)
if cache_item is None:
return None
else:
with self.save_timer('cache.read.prepare_request'):
grab.prepare_request()
with self.save_timer('cache.read.load_response'):
self.cache.load_response(grab, cache_item)
grab.log_request('CACHED')
self.inc_count('request')
self.inc_count('request-cache')
return {'ok': True, 'grab': grab,
'grab_config_backup': grab_config_backup,
'task': task, 'emsg': None}
def valid_response_code(self, code, task):
"""
Answer the question: if the response could be handled via
usual task handler or the task faield and should be processed as error.
"""
return (code < 400 or code == 404 or
code in task.valid_status)
def process_handler_error(self, func_name, ex, task, error_tb=None):
self.inc_count('error-%s' % ex.__class__.__name__.lower())
if error_tb is not None:
logger.error('Error in %s function' % func_name)
logger.error(error_tb)
else:
logger.error('Error in %s function' % func_name,
exc_info=ex)
# Looks strange but I really have some problems with
# serializing exception into string
try:
ex_str = unicode(ex)
except TypeError:
try:
ex_str = unicode(ex, 'utf-8', 'ignore')
except TypeError:
ex_str = str(ex)
task_url = task.url if task is not None else None
self.add_item('fatal', '%s|%s|%s|%s' % (
func_name, ex.__class__.__name__, ex_str, task_url))
if isinstance(ex, FatalError):
raise
def find_data_handler(self, data):
try:
return getattr(data, 'handler')
except AttributeError:
try:
handler = getattr(self, 'data_%s' % data.handler_key)
except AttributeError:
raise NoDataHandler('No handler defined for Data %s' % data.handler_key)
else:
return handler
def execute_task_handler(self, res, handler):
"""
Apply `handler` function to the network result.
If network result is failed then submit task again
to the network task queue.
"""
try:
handler_name = handler.__name__
except AttributeError:
handler_name = 'NONE'
if (res['task'].get('raw') or (
res['ok'] and self.valid_response_code(res['grab'].response.code, res['task']))):
try:
with self.save_timer('response_handler'):
with self.save_timer('response_handler.%s' % handler_name):
result = handler(res['grab'], res['task'])
if result is None:
pass
else:
for item in result:
self.process_handler_result(item, res['task'])
except NoDataHandler as ex:
raise
except Exception as ex:
self.process_handler_error(handler_name, ex, res['task'])
else:
self.inc_count('task-%s-ok' % res['task'].name)
else:
# Log the error
if res['ok']:
msg = res['emsg'] = 'HTTP %s' % res['grab'].response.code
else:
msg = res['emsg']
# TODO: REMOVE
#if 'Operation timed out after' in msg:
#num = int(msg.split('Operation timed out after')[1].strip().split(' ')[0])
#if num > 20000:
#import pdb; pdb.set_trace()
self.inc_count('network-error-%s' % res['emsg'][:20])
logger.error(u'Network error: %s' % msg)
# Try to repeat the same network query
if self.network_try_limit > 0:
task = res['task']
task.refresh_cache = True
# Should use task.grab_config or backup of grab_config
task.setup_grab_config(res['grab_config_backup'])
self.add_task(task)
# TODO: allow to write error handlers
def find_task_handler(self, task):
callback = task.get('callback')
if callback:
return callback
else:
try:
handler = getattr(self, 'task_%s' % task.name)
except AttributeError:
raise NoTaskHandler('No handler or callback defined for task %s' % task.name)
else:
return handler
def process_network_result(self, res, from_cache=False):
"""
Handle result received from network transport of
from the cache layer.
Find handler function for that task and call it.
"""
# Increase stat counters
self.inc_count('request-processed')
self.inc_count('task')
self.inc_count('task-%s' % res['task'].name)
if (res['task'].network_try_count == 1 and
res['task'].task_try_count == 1):
self.inc_count('task-%s-initial' % res['task'].name)
# Update traffic statistics
if res['grab'] and res['grab'].response:
self.timers['network-name-lookup'] += res['grab'].response.name_lookup_time
self.timers['network-connect'] += res['grab'].response.connect_time
self.timers['network-total'] += res['grab'].response.total_time
if not from_cache:
self.inc_count('download-size', res['grab'].response.download_size)
self.inc_count('upload-size', res['grab'].response.upload_size)
self.inc_count('download-size-with-cache', res['grab'].response.download_size)
self.inc_count('upload-size-with-cache', res['grab'].response.upload_size)
#self.inc_count('traffic-in
# NG
# FIX: Understand how it should work in NG spider
# TOFIX: start
stop = False
for mid in self.middleware_points['response']:
try:
mid_response = mid.process_response(self, res)
except StopTaskProcessing:
logger.debug('Got StopTaskProcessing exception')
stop = True
break
else:
if isinstance(mid_response, Task):
logger.debug('Got task from middleware')
self.add_task(mid_response)
stop = True
break
elif mid_response is None:
pass
else:
raise Exception('Unknown response from middleware %s' % mid)
# TOFIX: end
if stop:
return
if self.ng:
logger_verbose.debug('Submitting result for task %s to response queue' % res['task'])
self.network_response_queue.put(res)
else:
handler = self.find_task_handler(res['task'])
self.execute_task_handler(res, handler)
def change_proxy(self, task, grab):
"""
Assign new proxy from proxylist to the task.
"""
if task.use_proxylist and self.proxylist_enabled:
if self.proxy_auto_change:
self.proxy = self.proxylist.get_random()
if self.proxy:
proxy, proxy_userpwd, proxy_type = self.proxy
grab.setup(proxy=proxy, proxy_userpwd=<PASSWORD>,
proxy_type=proxy_type)
def process_new_task(self, task):
"""
Handle new task.
1) Setup Grab object for that task
2) Try to load task from the cache
3) If no cached data then submit task to network transport
"""
grab = self.setup_grab_for_task(task)
grab_config_backup = grab.dump_config()
cache_result = None
if self.is_task_cacheable(task, grab):
with self.save_timer('cache'):
with self.save_timer('cache.read'):
cache_result = self.load_task_from_cache(
self.transport, task, grab, grab_config_backup)
if cache_result:
logger_verbose.debug('Task data is loaded from the cache. Yielding task result.')
self.process_network_result(cache_result, from_cache=True)
self.inc_count('task-%s-cache' % task.name)
else:
if self.only_cache:
logger.debug('Skipping network request to %s' % grab.config['url'])
else:
self.inc_count('request-network')
self.inc_count('task-%s-network' % task.name)
self.change_proxy(task, grab)
with self.save_timer('network_transport'):
logger_verbose.debug('Submitting task to the transport layer')
try:
self.transport.process_task(task, grab, grab_config_backup)
except GrabInvalidUrl as ex:
logger.debug('Task %s has invalid URL: %s' % (
task.name, task.url))
self.add_item('invalid-url', task.url)
else:
logger_verbose.debug('Asking transport layer to do something')
def is_valid_for_cache(self, res):
"""
Check if network transport result could
be saved to cache layer.
res: {ok, grab, grab_config_backup, task, emsg}
"""
if res['ok']:
if self.cache_enabled:
if res['grab'].request_method == 'GET':
if not res['task'].get('disable_cache'):
if self.valid_response_code(res['grab'].response.code, res['task']):
return True
return False
def stop(self):
"""
This method set internal flag which signal spider
to stop processing new task and shuts down.
"""
logger_verbose.debug('Method `stop` was called')
self.work_allowed = False
def run(self):
"""
Main method. All work is done here.
"""
self.start_timer('total')
self.transport = MulticurlTransport(self.thread_number)
try:
self.setup_default_queue()
self.prepare()
self.start_timer('task_generator')
if not self.slave:
if not self.ng:
self.init_task_generator()
self.stop_timer('task_generator')
while self.work_allowed:
now = int(time.time())
if now - self.last_snapshot_values['timestamp'] > self.snapshot_interval:
snapshot = {'timestamp': now}
for key in ('download-size', 'upload-size',
'download-size-with-cache'):
snapshot[key] = self.counters[key] - self.last_snapshot_values[key]
self.last_snapshot_values[key] = self.counters[key]
snapshot['request-count'] = self.counters['request'] -\
self.last_snapshot_values['request-count']
self.last_snapshot_values['request-count'] = self.counters['request']
self.last_snapshot_values['timestamp'] = now
self.snapshots[now] = snapshot
self.snapshot_timestamps.append(now)
if self.snapshot_file:
with open(self.snapshot_file, 'a') as out:
out.write(json.dumps(snapshot) + '\n')
# FIXIT: REMOVE
# Run update task handler which
# updates database | |
"""The graphical representation of devices as nodes"""
import sys, time, os
if sys.version_info < (2, 6):
print "importing newer subprocess module from python 2.6"
from Core import subprocess
else:
import subprocess
from PyQt4 import QtCore, QtGui
from Core.globals import options, environ, mainWidgets, defaultOptions, availableyRouters, usedyRouters
from Properties import *
from Core.Item import *
# from StatsWindow import *
class DropItem(QtGui.QGraphicsItem):
def __init__(self, itemType=None):
"""
Create a draggable item, which can be dropped into the canvas.
"""
QtGui.QGraphicsItem.__init__(self)
if itemType:
self.device_type = itemType
self.image = QtGui.QImage(environ["images"] + self.device_type + ".gif")
if self.image.isNull():
mainWidgets["log"].append("Unknown node type " + str(self.device_type))
return
#matrix = QtGui.QMatrix(40.0/image.width(),0,0,40.0/image.height(),0,0)
#self.image = image.transformed(matrix, QtCore.Qt.SmoothTransformation)
self.setCursor(QtCore.Qt.OpenHandCursor)
if itemType in unimplementedTypes:
self.setToolTip(self.device_type.center(13) + "\nImplement me.")
self.setEnabled(False)
else:
self.setToolTip(self.device_type.center(21) + "\nDrag onto the canvas.")
def paint(self, painter, option, widget):
"""
Draw the representation.
"""
painter.setRenderHint(QtGui.QPainter.SmoothPixmapTransform, options["smoothing"])
if not self.isEnabled():
transparency = QtGui.QImage(self.image)
transparency.fill(QtGui.qRgba(0,0,0,50))
painter.drawImage(QtCore.QPoint(-self.image.width()/2, -self.image.height()/2), transparency)
painter.drawImage(QtCore.QPoint(-self.image.width()/2, -self.image.height()/2), self.image)
device_text = self.device_type
if self.device_type == "yRouter":
device_text += " (" + str(len(availableyRouters)) + ")"
painter.drawText(QtCore.QRectF(-70, self.image.height()/2, 145, 60), device_text, QtGui.QTextOption(QtCore.Qt.AlignHCenter))
def boundingRect(self):
"""
Get the bounding rectangle of the item.
"""
rect = self.image.rect()
toR = QtCore.QRectF(rect.left() - rect.width()/2, rect.top() - rect.height()/2, rect.width(), rect.height())
return toR
def mousePressEvent(self, event):
"""
Handle the mouse events on this item.
"""
if event.button() != QtCore.Qt.LeftButton:
event.ignore()
return
drag = QtGui.QDrag(event.widget())
mime = QtCore.QMimeData()
mime.setText(self.device_type)
drag.setMimeData(mime)
drag.setPixmap(QtGui.QPixmap.fromImage(self.image))
drag.setHotSpot(QtCore.QPoint(15, 30))
drag.start()
class Node(DropItem, Item):
def __init__(self, itemType = None):
"""
Create a draggable item for the main scene to represent devices.
"""
self.edgeList = []
DropItem.__init__(self, itemType)
itemTypes = nodeTypes[self.device_type]
index = self.findNextIndex(itemTypes[self.device_type])
if index == 0:
if self.device_type == "yRouter":
popup = mainWidgets["popup"]
popup.setWindowTitle("Cannot add yRouter")
popup.setText("There are no yRouters available to add to the topology!")
popup.show()
return
print "Node.__init__: I have raised an exception."
raise Exception
if self.device_type == "yRouter":
yRouter = availableyRouters.pop(0)
usedyRouters[index] = yRouter
if mainWidgets["drop"].commonDropArea.yRouterDrop is not None:
mainWidgets["drop"].commonDropArea.yRouterDrop.update()
if mainWidgets["drop"].netDropArea.yRouterDrop is not None:
mainWidgets["drop"].netDropArea.yRouterDrop.update()
name = self.device_type + "_%d" % index
self.properties = {}
self.setProperty("Name", name)
self.setProperty("name", name)
self.interfaces = []
self.newPos = QtCore.QPointF()
self.setFlag(QtGui.QGraphicsItem.ItemIsMovable, True)
self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable, True)
self.setZValue(1)
self.setToolTip(name)
self.shell = None
self.status = None
self.inc = 4
self.color = QtGui.QColor(0,0,0)
# Context Menu
self.menu = QtGui.QMenu()
self.menu.setPalette(defaultOptions["palette"])
self.menu.addAction("Delete", self.delete)
# if self.device_type == "Mobile":
# self.wstatsWindow = StatsWindow(self.getName(), mainWidgets["canvas"])
# self.setAcceptsHoverEvents(True)
# elif self.device_type == "Router":
# self.tail = None
# self.wshark = None
# self.rstatsWindow = None
scene = mainWidgets["canvas"].scene()
QtCore.QObject.connect(scene.getTimer(), QtCore.SIGNAL("timeout()"), self.updateColor)
def findNextIndex(self, index):
"""
Find the next index for the node's type.
"""
itemTypes = nodeTypes[self.device_type]
if self.device_type == "yRouter":
if not availableyRouters:
return 0
newIndex = availableyRouters[0]['ID']
itemTypes[self.device_type] = len(usedyRouters) + 1
else:
firstPass = True
newIndex = index + 1
if newIndex > 126:
newIndex = 1
firstPass = False
scene = mainWidgets["canvas"].scene()
while scene.findItem(self.device_type + "_%d" % newIndex) or newIndex == index:
newIndex += 1
if newIndex > 126:
if not firstPass:
return 0
newIndex = 1
firstPass = False
itemTypes[self.device_type] = newIndex
return newIndex
def setIndex(self, index):
"""
Set the index of the node.
"""
itemTypes = nodeTypes[self.device_type]
if index > itemTypes[self.device_type]:
itemTypes[self.device_type] = index
name = self.device_type + "_%d" % index
self.setProperty("Name", name)
self.setProperty("name", name)
self.setToolTip(name)
def setStatus(self, status):
"""
Set the status of the node.
"""
if self.status == status:
return
if not self.status and self.device_type == "Wireless_access_point":
client = mainWidgets["client"]
if client:
client.send("attachdetach %d" % self.getID())
self.status = status
if not status:
self.scene().stopRefresh()
elif status == "attached":
self.color = QtGui.QColor(0,255,0)
elif status == "detached":
self.color = QtGui.QColor(255,255,0)
else:
self.color = QtGui.QColor(255,0,0)
if not options["glowingLights"]:
self.scene().update()
def setWirelessStats(self, stats):
"""
Update the wireless stats of the node.
"""
self.wstatsWindow.updateStats(stats)
def setRouterStats(self, queue, size, rate):
"""
Update the router stats of the node.
"""
self.rstatsWindow.updateStats(queue, size, rate)
def updateColor(self):
"""
Update the color of the glowing light.
"""
if not options["glowingLights"] or not self.status:
return
currentGreen = self.color.green()
if currentGreen == 255:
self.inc = -4
elif currentGreen == 127:
self.inc = 4
currentRed = self.color.red()
if currentRed == 255:
self.inc = -4
elif currentRed == 127:
self.inc = 4
if self.status == "attached":
self.color.setGreen(self.color.green() + self.inc)
elif self.status == "detached":
self.color.setGreen(self.color.green() + self.inc)
self.color.setRed(self.color.red() + self.inc)
else:
self.color.setRed(self.color.red() + self.inc)
def paint(self, painter, option, widget):
"""
Draw the representation and its name.
"""
painter.setRenderHint(QtGui.QPainter.SmoothPixmapTransform, options["smoothing"])
painter.drawImage(QtCore.QPoint(-self.image.width()/2, -self.image.height()/2), self.image)
if options["names"]:
painter.drawText(QtCore.QRectF(-90, self.image.height()/2, 180, 60), self.getProperty("name"), QtGui.QTextOption(QtCore.Qt.AlignHCenter))
if self.status:
painter.setBrush(self.color)
painter.drawEllipse(self.lightPoint, 5, 5)
painter.setBrush(QtCore.Qt.NoBrush)
if self.isSelected():
painter.setPen(QtGui.QPen(QtCore.Qt.black, 1, QtCore.Qt.DashLine))
painter.drawRect(self.boundingRect())
def addEdge(self, edge):
"""
Add a connection with another node.
"""
self.edgeList.append(edge)
edge.adjust()
def removeEdge(self, edge):
"""
Remove a connection with another node.
"""
self.edgeList.remove(edge)
def edges(self):
"""
Return all of the node's current connections.
"""
return self.edgeList[:] # this notation gets a slice of the entire list, effectively cloning it
def calculateForces(self):
"""
Calculate the forces to determine movement of the arrange action.
"""
if not self.scene() or self.scene().mouseGrabberItem() is self:
self.newPos = self.pos()
return
# Sum up all forces pushing this item away.
xvel = 0.0
yvel = 0.0
for item in self.scene().items():
if not isinstance(item, Node):
continue
line = QtCore.QLineF(self.mapFromItem(item, 0, 0), QtCore.QPointF(0, 0))
dx = line.dx()
dy = line.dy()
l = 2.0 * (dx * dx + dy * dy)
if l > 0:
xvel += (dx * 150.0) / l
yvel += (dy * 150.0) / l
# Now subtract all forces pulling items together.
weight = (len(self.edgeList) + 1) * 50.0
for edge in self.edgeList:
if edge.sourceNode() is self:
pos = self.mapFromItem(edge.destNode(), 0, 0)
else:
pos = self.mapFromItem(edge.sourceNode(), 0, 0)
xvel += pos.x() / weight
yvel += pos.y() / weight
if QtCore.qAbs(xvel) < 0.1 and QtCore.qAbs(yvel) < 0.1:
xvel = yvel = 0.0
sceneRect = self.scene().sceneRect()
self.newPos = self.pos() + QtCore.QPointF(xvel, yvel)
self.newPos.setX(min(max(self.newPos.x(), sceneRect.left() + 10), sceneRect.right() - 10))
self.newPos.setY(min(max(self.newPos.y(), sceneRect.top() + 10), sceneRect.bottom() - 10))
def advance(self):
"""
Determine if nodes are to be advanced.
"""
if self.newPos == self.pos():
return False
self.setPos(self.newPos)
return True
def setPos(self, *args):
super(Node, self).setPos(*args)
for edge in self.edgeList:
edge.adjust()
def shape(self):
"""
Get the shape of the node.
"""
path = QtGui.QPainterPath()
path.addEllipse(self.boundingRect())
return path
def itemChange(self, change, value):
"""
Handle movement of the node.
"""
if True: #change == QtGui.QGraphicsItem.ItemPositionChange:
for edge in self.edgeList:
edge.adjust()
if change == QtGui.QGraphicsItem.ItemPositionChange:
mainWidgets["canvas"].itemMoved()
if self.device_type == "Mobile":
self.moveStats()
return QtGui.QGraphicsItem.itemChange(self, change, value)
def mousePressEvent(self, event):
"""
Handle mouse press events on the node.
"""
self.update()
if event.button() == QtCore.Qt.RightButton:
mainWidgets["canvas"].connectNode(self)
else:
QtGui.QGraphicsItem.mousePressEvent(self, event)
def update(self):
super(Node, self).update()
for edge in self.edgeList:
edge.adjust()
def mouseMoveEvent(self, event):
"""
Handle mouse move events on the node.
"""
self.update()
if mainWidgets["main"].isRunning() and \
self.device_type != "Mobile" and \
event.buttons() == QtCore.Qt.LeftButton:
if options["moveAlert"]:
popup = mainWidgets["popup"]
popup.setWindowTitle("Moving Disabled")
popup.setText("Cannot move devices other than Mobile in a running topology!")
popup.show()
return
QtGui.QGraphicsItem.mouseMoveEvent(self, event)
def mouseReleaseEvent(self, event):
"""
Handle mouse release events on the node.
"""
self.update()
QtGui.QGraphicsItem.mouseReleaseEvent(self, event)
if mainWidgets["main"].isRunning() and \
self.device_type == "Mobile" and \
event.button() == QtCore.Qt.LeftButton:
edges = self.edges()
if not edges:
return
wap = edges[0].getOtherDevice(self)
relpos = wap.pos()
pos = self.pos()
x = (pos.x() - relpos.x()) / 4
y = (pos.y() - relpos.y()) / 4
# propogate the change to the real wireless_access_point
client = mainWidgets["client"]
if client:
client.send("screen -S WAP_%d -X eval 'stuff \"mov set node %d location %d %d 0\"\\015'" \
% (wap.getID(), self.getID(), x, y))
elif not mainWidgets["main"].isRunning():
self.itemChange(0, 0)
def attach(self):
"""
Attach to corresponding device on backend.
"""
return
def mouseDoubleClickEvent(self, event):
"""
Handle mouse double click events on the node.
"""
if mainWidgets["main"].isRunning():
self.attach()
def nudge(self):
"""
Nudge the node to trigger a sequence of movements.
"""
pos = self.pos()
self.newPos.setX(pos.x() + 1)
self.newPos.setY(pos.y() + 1)
self.advance()
self.newPos.setX(pos.x())
self.newPos.setY(pos.y())
self.advance()
def delete(self):
"""
Delete the node and its edges.
"""
if mainWidgets["main"].isRunning():
mainWidgets["log"].append("You cannot delete items from a running topology!")
return
from Tutorial import Tutorial
if isinstance(mainWidgets["canvas"], Tutorial):
mainWidgets["log"].append("You cannot delete items from the tutorial!")
return
for edge in self.edges():
edge.delete()
if self.device_type == "yRouter":
index = self.getID()
yRouter = usedyRouters[index]
availableyRouters.append(yRouter)
availableyRouters.sort(key=lambda YunEntity: YunEntity['ID'])
del usedyRouters[index]
if mainWidgets["drop"].commonDropArea.yRouterDrop is not None:
mainWidgets["drop"].commonDropArea.yRouterDrop.update()
if mainWidgets["drop"].netDropArea.yRouterDrop is not None:
mainWidgets["drop"].netDropArea.yRouterDrop.update()
self.scene().removeItem(self)
def restart(self):
"""
Restart the | |
"""
summary_data = []
#
totalcounted = 0
counted_dict = {}
size_range_dict = {} # Value for sizeclasses aggregated.
locked_list = []
#
for sampleobject in self._sample_rows.values():
# Check method step.
if method_step:
if not method_step == sampleobject.get_method_step():
continue
# Count on scientific name. Standard alternative.
taxon = sampleobject.get_scientific_full_name()
sort_order = taxon
size = sampleobject.get_size_class()
# Use the same key for locked items.
if sampleobject.is_locked():
if size:
locked_list.append(taxon + ' [' + size + '] ')
else:
locked_list.append(taxon)
# Count on class name.
if summary_type == 'Counted per classes':
taxon = plankton_core.Species().get_taxon_value(taxon, 'taxon_class')
sort_order = taxon
if len(taxon) == 0:
taxon = '<class unknown>'
sort_order = taxon
# Count on scientific name and size class.
elif summary_type == 'Counted per taxa':
if size:
taxon = taxon
sort_order = taxon
elif summary_type == 'Counted per taxa/sizes':
if size:
size_for_sorting = f"{taxon} [{float(size):5.0f}]"
taxon = taxon + ' [' + size + '] '
sort_order = size_for_sorting
# Create in list, first time only.
if taxon not in counted_dict:
counted_dict[taxon] = {}
counted_dict[taxon]["counted_units"] = 0
counted_dict[taxon]["as_text"] = '0'
counted_dict[taxon]["sort_order"] = sort_order
# Add.
try:
abundance_class = sampleobject.get_abundance_class()
if abundance_class in ['', 0]:
# Quantitative.
counted_dict[taxon]["counted_units"] += int(sampleobject.get_counted_units())
counted_dict[taxon]["as_text"] = str(counted_dict[taxon]["counted_units"])
totalcounted += int(sampleobject.get_counted_units())
counted_units_list = sampleobject.get_counted_units_list()
if ';' in counted_units_list:
last_transect_units = counted_units_list.split(';')[-1]
counted_dict[taxon]["as_text"] = str(counted_dict[taxon]["counted_units"]) + '/' + last_transect_units
else:
# Qualitative.
if summary_type in ['Counted per taxa', 'Counted per taxa/sizes']:
# if counted_dict[taxon] == 0:
if counted_dict[taxon]["counted_units"] == 0:
if abundance_class == '1':
counted_dict[taxon]["counted_units"] = 1
counted_dict[taxon]["as_text"] = '1=Observed'
elif abundance_class == '2':
counted_dict[taxon]["counted_units"] = 2
counted_dict[taxon]["as_text"] = '2=Several cells'
elif abundance_class == '3':
counted_dict[taxon]["counted_units"] = 3
counted_dict[taxon]["as_text"] = '3=1-10%'
elif abundance_class == '4':
counted_dict[taxon]["counted_units"] = 4
counted_dict[taxon]["as_text"] = '4=10-50%'
elif abundance_class == '5':
counted_dict[taxon]["counted_units"] = 5
counted_dict[taxon]["as_text"] = '5=50-100%'
else:
counted_dict[taxon]["counted_units"] = 1
counted_dict[taxon]["as_text"] = '<Qualitative>'
else:
counted_dict[taxon]["as_text"] = '<Qualitative>'
except:
pass # If value = ''.
#
if summary_type in ['Counted per taxa/sizes']:
bvol_size_range = plankton_core.Species().get_bvol_dict(sampleobject.get_scientific_name(), size).get('bvol_size_range', '')
if bvol_size_range:
size_range_dict[taxon] = '(Size: ' + bvol_size_range + ')'
#
summary_data.append('Total counted: ' + str(totalcounted))
summary_data.append('')
if most_counted_sorting == False:
# Alphabetical.
for key, _value in sorted(counted_dict.items(), key=lambda x:x[1]["sort_order"]):
size_range = ''
if key in size_range_dict:
size_range = ' ' + size_range_dict[key]
lock_info = ''
if key in locked_list:
lock_info = ' [Locked]'
summary_data.append(key + ': ' + str(counted_dict[key]["as_text"]) + lock_info + size_range)
else:
# Sort for most counted.
for key, _value in sorted(counted_dict.items(), key=lambda x:x[1]["counted_units"], reverse=True):
size_range = ''
if key in size_range_dict:
size_range = ' ' + size_range_dict[key]
lock_info = ''
if key in locked_list:
lock_info = ' [Locked]'
summary_data.append(key + ': ' + str(counted_dict[key]["as_text"]) + lock_info + size_range)
#
return summary_data
def get_locked_taxa(self, method_step = None):
""" """
species_locked_list = []
#
for sampleobject in self._sample_rows.values():
# Check method step.
if method_step:
if not method_step == sampleobject.get_method_step():
continue
#
taxon = sampleobject.get_scientific_full_name()
size = sampleobject.get_size_class()
#
if sampleobject.is_locked():
species_locked_list.append([taxon, size, True])
else:
species_locked_list.append([taxon, size, False])
#
return species_locked_list
def lock_taxa(self, scientific_full_name, size_class, locked_at_count_area):
""" """
search_dict = {}
search_dict['scientific_full_name'] = scientific_full_name
search_dict['size_class'] = size_class
samplerowkey = SampleRow(search_dict).get_key()
if samplerowkey in self._sample_rows:
if not self._sample_rows[samplerowkey].is_locked():
self._sample_rows[samplerowkey].set_lock(locked_at_count_area)
def unlock_taxa(self, scientific_full_name, size_class, count_area_number):
""" """
search_dict = {}
search_dict['scientific_full_name'] = scientific_full_name
search_dict['size_class'] = size_class
samplerowkey = SampleRow(search_dict).get_key()
if samplerowkey in self._sample_rows:
self._sample_rows[samplerowkey].set_lock('')
#
self._sample_rows[samplerowkey].set_count_area_number(count_area_number)
def get_sample_row_dict(self, counted_row_dict):
""" """
samplerowkey = SampleRow(counted_row_dict).get_key()
if samplerowkey in self._sample_rows:
return self._sample_rows[samplerowkey].get_sample_row_dict()
#
return {}
def update_sample_row(self, counted_row_dict):
""" """
if len(counted_row_dict.get('scientific_name', '')) > 0:
samplerowkey = SampleRow(counted_row_dict).get_key()
if samplerowkey in self._sample_rows:
self._sample_rows[samplerowkey].update_sample_row_dict(counted_row_dict)
def get_counted_value(self, selected_dict):
""" """
samplerowkey = SampleRow(selected_dict).get_key()
if samplerowkey in self._sample_rows:
return self._sample_rows[samplerowkey].get_counted_units()
else:
return 0
def update_counted_value_in_core(self, counted_row_dict, value):
""" """
if value == '0':
# Delete row.
samplerowkey = SampleRow(counted_row_dict).get_key()
if samplerowkey in self._sample_rows:
del self._sample_rows[samplerowkey]
return
#
if len(counted_row_dict.get('scientific_full_name', '')) > 0:
samplerowkey = SampleRow(counted_row_dict).get_key()
if samplerowkey not in self._sample_rows:
self._sample_rows[samplerowkey] = SampleRow(counted_row_dict)
# Check if the same method step or locked taxa.
samplerowobject = self._sample_rows[samplerowkey]
# Don't check for validity when the value is same same.
if samplerowobject.get_counted_units() == value:
return
if samplerowobject.is_locked():
raise UserWarning('Selected taxon is locked')
# if counted_row_dict.get('method_step') == samplerowobject.get_method_step():
if True:
samplerowobject.set_counted_units(value)
samplerowobject.update_sample_row_dict(counted_row_dict)
else:
raise UserWarning('Selected taxon is already counted in another method step.')
def update_abundance_class_in_core(self, counted_row_dict, value):
""" """
if value == '0':
# Delete row.
samplerowkey = SampleRow(counted_row_dict).get_key()
if samplerowkey in self._sample_rows:
del self._sample_rows[samplerowkey]
return
#
if len(counted_row_dict.get('scientific_full_name', '')) > 0:
samplerowkey = SampleRow(counted_row_dict).get_key()
if samplerowkey not in self._sample_rows:
self._sample_rows[samplerowkey] = SampleRow(counted_row_dict)
# Check if the same method step or locked taxa.
samplerowobject = self._sample_rows[samplerowkey]
# Don't check for validity when the value is same same.
if samplerowobject.get_counted_units() == value:
return
if samplerowobject.is_locked():
raise UserWarning('Selected taxon is locked')
# if counted_row_dict.get('method_step') == samplerowobject.get_method_step():
if True:
# samplerowobject.set_counted_units(value)
samplerowobject.set_abundance_class(value)
samplerowobject.update_sample_row_dict(counted_row_dict)
else:
raise UserWarning('Selected taxon is already counted in another method step.')
def delete_rows_in_method_step(self, current_method_step):
""" """
for sampleobject in list(self._sample_rows.values()): # Clone list when deleting content.
if sampleobject.get_method_step() == current_method_step:
del self._sample_rows[sampleobject.get_key()]
def update_coeff_for_sample_rows(self, current_method_step, count_area_number, coefficient):
""" """
for sampleobject in self._sample_rows.values():
if sampleobject.get_method_step() == current_method_step:
if not sampleobject.is_locked():
sampleobject.set_count_area_number(count_area_number)
sampleobject.set_coefficient(coefficient)
def import_sample_from_excel(self, excel_file_path):
""" Import from Excel. """
# Sample info.
tablefilereader = toolbox_utils.TableFileReader(
file_path = '',
excel_file_name = excel_file_path,
excel_sheet_name = 'sample_info.txt',
)
sample_header = tablefilereader.header()
sample_rows = tablefilereader.rows()
#
self._tablefilewriter_sample_info.write_file(sample_header, sample_rows)
# Sample data.
tablefilereader = toolbox_utils.TableFileReader(
file_path = '',
excel_file_name = excel_file_path,
excel_sheet_name = 'sample_data.txt',
)
data_header = tablefilereader.header()
data_rows = tablefilereader.rows()
#
self._tablefilewriter_sample_data.write_file(data_header, data_rows)
# Sample method.
tablefilereader = toolbox_utils.TableFileReader(
file_path = '',
excel_file_name = excel_file_path,
excel_sheet_name = 'counting_method.txt',
)
method_header = tablefilereader.header()
method_rows = tablefilereader.rows()
path = os.path.join(self._dataset_dir_path, self._dataset_name, self._sample_name)
tablefilewriter_sample_method = toolbox_utils.TableFileWriter(
file_path = path,
text_file_name = 'counting_method.txt',
)
#
tablefilewriter_sample_method.write_file(method_header, method_rows)
def export_sample_to_excel(self, export_target_dir, export_target_filename):
""" Export to Excel. """
excel_export_writer = ExcelExportWriter(self)
excel_export_writer.to_excel(export_target_dir, export_target_filename)
class SampleRow():
""" Defines the content of one counted sample row. """
def __init__(self, sample_row_dict):
""" """
self._sample_row_dict = {}
self._sample_row_dict.update(sample_row_dict)
#
self._scientific_full_name = self._sample_row_dict.get('scientific_full_name', '')
self._scientific_name = self._sample_row_dict.get('scientific_name', '')
self._size_class = self._sample_row_dict.get('size_class', '')
#
# Get species related dictionaries for this taxon/sizeclass.
self._taxon_dict = plankton_core.Species().get_taxon_dict(self._scientific_name)
self._size_class_dict = plankton_core.Species().get_bvol_dict(self._scientific_name, self._size_class)
self._sample_row_dict['taxon_class'] = self._taxon_dict.get('taxon_class', '')
self._sample_row_dict['unit_type'] = self._size_class_dict.get('bvol_unit', '')
# Trophic type.
if not self._sample_row_dict.get('trophic_type', ''):
trophic_type = self._size_class_dict.get('trophic_type', '')
if not trophic_type:
trophic_type = self._taxon_dict.get('trophic_type', '')
if trophic_type:
self._sample_row_dict['trophic_type'] = trophic_type
#
self._bvol_volume = 0.0
self._bvol_carbon = 0.0
try:
self._bvol_volume = float(self._size_class_dict.get('bvol_calculated_volume_um3', '0').replace(',', '.'))
self._bvol_carbon = float(self._size_class_dict.get('bvol_calculated_carbon_pg', '0').replace(',', '.'))
except Exception as e:
raise UserWarning('Failed to read BVOL volume or carbon. Hint: Save Excel with values, not formulas. Exception: ' + str(e))
self._sample_row_dict['volume_um3_unit'] = str(self._round_value(self._bvol_volume))
self._sample_row_dict['carbon_pgc_unit'] = str(self._round_value(self._bvol_carbon))
def get_sample_row_dict(self):
""" """
# print('DEBUG GET scientific_full_name: ' + self._sample_row_dict.get('scientific_full_name', ''))
# print('DEBUG GET size_class: ' + self._sample_row_dict.get('size_class', ''))
# print('DEBUG GET variable_comment: ' + self._sample_row_dict.get('variable_comment', ''))
return self._sample_row_dict
def update_sample_row_dict(self, sample_row_dict):
""" """
# print('DEBUG UPDATE scientific_full_name: ' + sample_row_dict.get('scientific_full_name', ''))
# print('DEBUG UPDATE size_class: ' + sample_row_dict.get('size_class', ''))
# print('DEBUG UPDATE variable_comment: ' + sample_row_dict.get('variable_comment', ''))
self._sample_row_dict.update(sample_row_dict)
def get_key(self):
""" """
rowkey = self._scientific_full_name + '+' + self._size_class
return rowkey
def get_scientific_full_name(self):
""" """
return self._scientific_full_name
def get_scientific_name(self):
""" """
return self._scientific_name
def get_size_class(self):
""" """
return self._size_class
def get_method_step(self):
""" """
return self._sample_row_dict.get('method_step', '')
def set_lock(self, locked_at_count_area):
""" """
self._sample_row_dict['locked_at_area'] = locked_at_count_area
def get_locked_at_area(self):
""" """
return self._sample_row_dict.get('locked_at_area', '')
def set_unlock(self):
""" """
self._sample_row_dict['locked_at_area'] = ''
def is_locked(self):
""" """
if self._sample_row_dict.get('locked_at_area', '') == '':
return False
else:
return True
def get_count_area_number(self):
""" """
return self._sample_row_dict.get('count_area_number', '')
def set_count_area_number(self, count_area_number):
""" """
self._sample_row_dict['count_area_number'] = count_area_number
count_area_number = int(count_area_number)
# Adjust length of list for counted per are.
counted_units_list = self._sample_row_dict.get('counted_units_list', None)
if counted_units_list:
counted_units_list = [int(x) for x in counted_units_list.split(';')]
else:
counted_units_list = count_area_number * [0]
counted_units_list[0] = self._sample_row_dict.get('counted_units', '0')
#
if len(counted_units_list) < count_area_number:
counted_units_list += (count_area_number | |
from operator import itemgetter
from collections import OrderedDict
from typing import List, Tuple, Dict, Optional
import torch
from torch import nn, Tensor
import torch.nn.functional as F
from torchvision.models.detection.image_list import ImageList
from torchvision.models.detection.rpn import (
RegionProposalNetwork,
concat_box_prediction_layers,
)
from torchvision.models.detection.generalized_rcnn import GeneralizedRCNN
from torchvision.models.detection.roi_heads import RoIHeads, fastrcnn_loss
from torchvision.ops import boxes as box_ops
class XRegionProposalNetwork(RegionProposalNetwork):
def __init__(self, source):
self.__dict__.update(source.__dict__)
def filter_proposals(
self,
proposals: Tensor,
objectness: Tensor,
image_shapes: List[Tuple[int, int]],
num_anchors_per_level: List[int],
) -> Tuple[List[Tensor], List[Tensor]]:
num_images = proposals.shape[0]
device = proposals.device
# do not backprop through objectness
objectness = objectness.detach()
objectness = objectness.reshape(num_images, -1)
levels = [
torch.full((n,), idx, dtype=torch.int64, device=device)
for idx, n in enumerate(num_anchors_per_level)
]
levels = torch.cat(levels, 0)
levels = levels.reshape(1, -1).expand_as(objectness)
# select top_n boxes independently per level before applying nms
top_n_idx = self._get_top_n_idx(objectness, num_anchors_per_level)
image_range = torch.arange(num_images, device=device)
batch_idx = image_range[:, None]
objectness = objectness[batch_idx, top_n_idx]
levels = levels[batch_idx, top_n_idx]
proposals = proposals[batch_idx, top_n_idx]
objectness_prob = torch.sigmoid(objectness)
final_boxes = []
final_scores = []
for boxes, scores, lvl, img_shape in zip(
proposals, objectness_prob, levels, image_shapes
):
lvl = torch.zeros_like(lvl)
boxes = box_ops.clip_boxes_to_image(boxes, img_shape)
# remove small boxes
keep = box_ops.remove_small_boxes(boxes, self.min_size)
boxes, scores, lvl = boxes[keep], scores[keep], lvl[keep]
# remove low scoring boxes
# use >= for Backwards compatibility
keep = torch.where(scores >= self.score_thresh)[0]
boxes, scores, lvl = boxes[keep], scores[keep], lvl[keep]
# non-maximum suppression, independently done per level
keep = box_ops.batched_nms(boxes, scores, lvl, self.nms_thresh)
# keep only topk scoring predictions
keep = keep[: self.post_nms_top_n()]
boxes, scores = boxes[keep], scores[keep]
final_boxes.append(boxes)
final_scores.append(scores)
return final_boxes, final_scores
def forward(
self,
images: ImageList,
features: Dict[str, Tensor],
targets: Optional[List[Dict[str, Tensor]]] = None,
) -> Tuple[List[Tensor], Dict[str, Tensor]]:
# modified to also return objectness score
"""
Args:
images (ImageList): images for which we want to compute the predictions
features (Dict[str, Tensor]): features computed from the images that are
used for computing the predictions. Each tensor in the list
correspond to different feature levels
targets (List[Dict[str, Tensor]]): ground-truth boxes present in the image (optional).
If provided, each element in the dict should contain a field `boxes`,
with the locations of the ground-truth boxes.
"""
# RPN uses all feature maps that are available
features = list(features.values())
objectness, pred_bbox_deltas = self.head(features)
anchors = self.anchor_generator(images, features)
num_images = len(anchors)
num_anchors_per_level_shape_tensors = [o[0].shape for o in objectness]
num_anchors_per_level = [
s[0] * s[1] * s[2] for s in num_anchors_per_level_shape_tensors
]
objectness, pred_bbox_deltas = concat_box_prediction_layers(
objectness, pred_bbox_deltas
)
# apply pred_bbox_deltas to anchors to obtain the decoded proposals
# note that we detach the deltas because Faster R-CNN do not backprop through
# the proposals
proposals = self.box_coder.decode(pred_bbox_deltas.detach(), anchors)
proposals = proposals.view(num_images, -1, 4)
boxes, scores = self.filter_proposals(
proposals, objectness, images.image_sizes, num_anchors_per_level
)
losses = {}
if self.training:
assert targets is not None
labels, matched_gt_boxes = self.assign_targets_to_anchors(anchors, targets)
regression_targets = self.box_coder.encode(matched_gt_boxes, anchors)
loss_objectness, loss_rpn_box_reg = self.compute_loss(
objectness, pred_bbox_deltas, labels, regression_targets
)
losses = {
"loss_objectness": loss_objectness,
"loss_rpn_box_reg": loss_rpn_box_reg,
}
return {"boxes": boxes, "scores": scores, "losses": losses}
class XRoIHeads(RoIHeads):
def __init__(self, source):
self.__dict__.update(source.__dict__)
def postprocess_detections(
self,
class_logits, # type: Tensor
box_regression, # type: Tensor
proposals, # type: List[Tensor]
image_shapes, # type: List[Tuple[int, int]]
):
# so far the same as the postprocess_method from the torchvision impl.
# type: (...) -> Tuple[List[Tensor], List[Tensor], List[Tensor]]
device = class_logits.device
num_classes = class_logits.shape[-1]
boxes_per_image = [boxes_in_image.shape[0] for boxes_in_image in proposals]
pred_boxes = self.box_coder.decode(box_regression, proposals)
pred_scores = F.softmax(class_logits, -1)
pred_boxes_list = pred_boxes.split(boxes_per_image, 0)
pred_scores_list = pred_scores.split(boxes_per_image, 0)
all_boxes = []
all_scores = []
all_labels = []
for boxes, scores, image_shape in zip(
pred_boxes_list, pred_scores_list, image_shapes
):
boxes = box_ops.clip_boxes_to_image(boxes, image_shape)
# create labels for each prediction
labels = torch.arange(num_classes, device=device)
labels = labels.view(1, -1).expand_as(scores)
# remove predictions with the background label
boxes = boxes[:, 1:]
scores = scores[:, 1:]
labels = labels[:, 1:]
# batch everything, by making every class prediction be a separate instance
boxes = boxes.reshape(-1, 4)
scores = scores.reshape(-1)
labels = labels.reshape(-1)
# remove low scoring boxes
inds = torch.where(scores > self.score_thresh)[0]
boxes, scores, labels = boxes[inds], scores[inds], labels[inds]
# remove empty boxes
keep = box_ops.remove_small_boxes(boxes, min_size=1e-2)
boxes, scores, labels = boxes[keep], scores[keep], labels[keep]
# non-maximum suppression, independently done per class
keep = box_ops.batched_nms(boxes, scores, labels, self.nms_thresh)
# keep only topk scoring predictions
keep = keep[: self.detections_per_img]
boxes, scores, labels = boxes[keep], scores[keep], labels[keep]
all_boxes.append(boxes)
all_scores.append(scores)
all_labels.append(labels)
return all_boxes, all_scores, all_labels
def forward(
self,
features, # type: Dict[str, Tensor]
proposals, # type: List[Tensor]
image_shapes, # type: List[Tuple[int, int]]
targets=None, # type: Optional[List[Dict[str, Tensor]]]
):
# modified to return box features
# type: (...) -> Tuple[List[Dict[str, Tensor]], Dict[str, Tensor]]
"""
Args:
features (List[Tensor])
proposals (List[Tensor[N, 4]])
image_shapes (List[Tuple[H, W]])
targets (List[Dict])
"""
if targets is not None:
for t in targets:
# TODO: https://github.com/pytorch/pytorch/issues/26731
floating_point_types = (torch.float, torch.double, torch.half)
assert (
t["boxes"].dtype in floating_point_types
), "target boxes must of float type"
assert (
t["labels"].dtype == torch.int64
), "target labels must of int64 type"
if self.has_keypoint():
assert (
t["keypoints"].dtype == torch.float32
), "target keypoints must of float type"
if self.training:
(
proposals,
matched_idxs,
labels,
regression_targets,
) = self.select_training_samples(proposals, targets)
else:
labels = None
regression_targets = None
matched_idxs = None
box_features0 = self.box_roi_pool(features, proposals, image_shapes)
## box_featuers0 is a 256x7x7 (the 7x7 is a parameter from the box_roi_palign).
# it is then flattened to a single 1024 vector by the box_head
box_features = self.box_head(box_features0)
# there is a final box for each class (it is category dependent).
class_logits, box_regression = self.box_predictor(box_features)
result: List[Dict[str, torch.Tensor]] = []
losses = {}
if self.training:
assert labels is not None and regression_targets is not None
loss_classifier, loss_box_reg = fastrcnn_loss(
class_logits, box_regression, labels, regression_targets
)
losses = {"loss_classifier": loss_classifier, "loss_box_reg": loss_box_reg}
else:
boxes, scores, labels = self.postprocess_detections(
class_logits, box_regression, proposals, image_shapes
)
num_images = len(boxes)
for i in range(num_images):
result.append(
{
"boxes": boxes[i],
"labels": labels[i],
"scores": scores[i],
}
)
return {"result": result, "losses": losses, "box_features": box_features}
class XGeneralizedRCNN(GeneralizedRCNN):
def __init__(self, source):
self.__dict__.update(source.__dict__)
self.rpn = XRegionProposalNetwork(self.rpn)
self.roi_heads = XRoIHeads(self.roi_heads)
def forward(self, images, targets=None):
# modify to use modified RPN, and roi_heads
# type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]
"""
Args:
images (list[Tensor]): images to be processed
targets (list[Dict[str, Tensor]]): ground-truth boxes present in the image (optional)
Returns:
result (list[BoxList] or dict[Tensor]): the output from the model.
During training, it returns a dict[Tensor] which contains the losses.
During testing, it returns list[BoxList] contains additional fields
like `scores`, `labels` and `mask` (for Mask R-CNN models).
"""
if self.training and targets is None:
raise ValueError("In training mode, targets should be passed")
if self.training:
assert targets is not None
for target in targets:
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
if len(boxes.shape) != 2 or boxes.shape[-1] != 4:
raise ValueError(
f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}."
)
else:
raise ValueError(
f"Expected target boxes to be of type Tensor, got {type(boxes)}."
)
original_image_sizes: List[Tuple[int, int]] = []
for img in images:
val = img.shape[-2:]
assert len(val) == 2
original_image_sizes.append((val[0], val[1]))
images, targets = self.transform(images, targets)
# Check for degenerate boxes
# TODO: Move this to a function
if targets is not None:
for target_idx, target in enumerate(targets):
boxes = target["boxes"]
degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]
if degenerate_boxes.any():
# print the first degenerate box
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist()
raise ValueError(
"All bounding boxes should have positive height and width."
f" Found invalid box {degen_bb} for target at index {target_idx}."
)
features = self.backbone(images.tensors)
if isinstance(features, torch.Tensor):
features = OrderedDict([("0", features)])
# proposals, proposal_losses = self.rpn(images, features, targets)
rpn_out = self.rpn(images, features, targets)
proposals, proposal_losses, objectness_scores = itemgetter(
"boxes", "losses", "scores"
)(rpn_out)
# transpose order: key, index -> order: index, key, just like detections
rpn_out_transpose = []
for i in range(len(rpn_out["boxes"])):
rpn_out_transpose.append(
{"boxes": proposals[i], "scores": objectness_scores[i]}
)
## will rescale boxes to original size
proposals_out = self.transform.postprocess(
rpn_out_transpose, images.image_sizes, original_image_sizes
)
# detections, detector_losses = self.roi_heads(features, proposals, images.image_sizes, targets)
roi_heads_out = self.roi_heads(features, proposals, images.image_sizes, targets)
detections, detector_losses, box_features = itemgetter(
"result", "losses", "box_features"
)(roi_heads_out)
boxes_per_image = [boxes_in_image.shape[0] | |
<filename>src/python/packages/metadapter/processors/mdoAzimuth_processor.py
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 03 2017
@author: <NAME>
"""
import numpy
import math
from sys import version_info
if version_info.major <= 2:
import OSC
else:
# Use the self-made port for Python 3 (experimental)
from ..thirdparty import OSC as OSC
#from metadata_processor_interface import MetadataProcessorInterface
# Metadapter2 version:
from metadapter import SequenceProcessorInterface
# FUNCTION to convert radians to degrees
def rad2deg( w ):
return 180.0/math.pi * w
# FUNCTION to convert degrees to radians
def deg2rad( w ):
return w / 180.0*math.pi
# FUNCTION to convert spherical degrees to cartesian
def sphDeg2cart(az,el,r):
az = deg2rad(az)
el = deg2rad(el)
z = r * math.sin(el)
rcoselev = r * math.cos(el)
x = rcoselev * math.cos(az)
y = rcoselev * math.sin(az)
return x, y, z
# FUNCTION to convert cartesian to degrees
def cart2sphDeg(x,y,z):
XsqPlusYsq = x**2 + y**2
r = math.sqrt(XsqPlusYsq + z**2) # r
elev = math.atan2(z,math.sqrt(XsqPlusYsq)) # theta
az = math.atan2(y,x) # phi
return r, rad2deg(az), rad2deg(elev)
# FUNCTIONS to convert between dB and linear
def lin2dB( linVal ):
return 20.0 * math.log10( linVal )
def dB2lin( dbVal ):
return math.pow( 10.0, 0.05 * dbVal )
def send_loudspeaker_content(string,osc_address,host,port):
c = OSC.OSCClient()
c.connect((host, port))
oscmsg = OSC.OSCMessage()
oscmsg.setAddress(osc_address)
oscmsg.append(string)
c.send(oscmsg)
# FUNCTION to parse room description .json file (specified by a key input to the metadapter)
def Parse_Room_Description(filename):
import json
# Set up room object dictionary
room_obj_list = ['None','Bed','Books','Ceiling','Chair','Floor','Furniture','Objects','Picture','Sofa','Table','TV','Unknown','Wall','Window','Any']
room_obj_dict = {key: value for (value, key) in enumerate(room_obj_list)}
#print( room_obj_dict['chair'] )
if filename == 'None':
print( "MDO processor: no room description file (.json) specified" )
return None
# Read the file
with open(filename) as data_file:
data = json.load(data_file)
#r = list(data)
# Preallocate a list
room_objects = []
print( "MDO processor: found some room objects (in %s):" % filename )
for x in list(data):
if 'Category' in data[x]:
object_category = data[x]['Category'].encode('ascii')
if object_category not in room_obj_list:
object_category = "Unknown"
else: # No category specified
object_category = "Unknown"
room_objects.append( dict(
name=x.encode('ascii'),
category=object_category,
room_obj_num=room_obj_dict[object_category],
x=data[x]['Centre_x'],
y=data[x]['Centre_y'],
z=data[x]['Centre_z'] ) )
print('')
print( "Found %i room objects:" % len(room_objects) )
for i, ro in enumerate(room_objects):
print( " %i. Name: %s. Category: %s (category number %i)" % (i+1, ro['name'], ro['category'], float(ro['room_obj_num'])))
print('')
return room_objects
# FUNCTION to parse loudspeaker config .xml file
def Parse_Loudspeaker_Config(filename):
if filename == 'None':
print( "MDO processor: no loudspeaker config file (.xml) specified" )
return None
import xml.etree.ElementTree as ET
# Read the file
tree = ET.parse(filename)
root = tree.getroot()
# Preallocate a list of loudspeakers
loudspeakers = []
for child in root:
if child.tag == "loudspeaker": # found a loudspeaker
if 'name' in child.attrib:
name = child.attrib['name']
else:
name = 'no name' # Unknown name is set as 'no name'
if 'mdo' in child.attrib: # If the MDO attribute exists
mdo = int(child.attrib['mdo']) # ...see what it is
else: # Otherwise, it doesn't exist
mdo = 0 # ...set MDO to zero
if 'quality' in child.attrib:
quality = child.attrib['quality']
else:
quality = 'unknown' # Unknown quality is set as unknown
if 'type' in child.attrib:
type = child.attrib['type']
else:
type = 'unknown'
if 'switch' in child.attrib:
switch = int(child.attrib['switch'])
else:
switch = 1
if 'function' in child.attrib:
function = child.attrib['function']
else:
function = 'unknown'
id = int(child.attrib['id'])
channel = int(child.attrib['channel'])
# Get the loudspeaker position
for lschild in child:
#print( lschild.attrib )
if lschild.tag == "polar":
x,y,z = sphDeg2cart(float(lschild.attrib['az']),float(lschild.attrib['el']),float(lschild.attrib['r']))
elif lschild.tag == "cart":
x,y,z = float(lschild.attrib['x']),float(lschild.attrib['y']),float(lschild.attrib['z'])
#print( x,y,z )
# Compile the loudspeaker list of dictionaries
loudspeakers.append( dict(name=name, id=id, channel=channel, mdo=mdo, quality=quality, type=type, x=x, y=y, z=z, switch=switch, function=function) )
#for l in loudspeakers:
#print( l )
#print "Found a loudspeaker. MDO: %i" % int(mdo)
# See if it should be used for MDO.
# If so, get its position, quality, type, channel
print('')
print("Found %i loudspeakers:" % len(loudspeakers))
for i, ls in enumerate(loudspeakers):
print(' %i. Name: %s. Type: %s. Quality: %s. MDO loudspeaker: %s. x: %2.2f y: %2.2f z: %2.2f' % (i+1, ls['name'], ls['type'], ls['quality'], ['no','yes'][ls['mdo']], ls['x'], ls['y'], ls['z']))
print('')
return loudspeakers
# FUNCTION: find nearest loudspeaker (return the channel number)
def Find_Nearest_Loudspeaker(point,loudspeakers):
norm_dist = True
if norm_dist == True:
#print point
mag = math.sqrt(point[0]**2 + point[1]**2 + point[2]**2)
point = [n/mag for n in point]
#print point
if type(point) is list:
# Convert list to tuple
point = tuple(point)
from scipy.spatial import distance
dist = []
az_diff = []
ls_az_list = []
for l in loudspeakers:
# Get the loudspeaker position
if norm_dist == True:
l_norm = [l['x'], l['y'], l['z']]
mag = math.sqrt(l_norm[0]**2 + l_norm[1]**2 + l_norm[2]**2)
l_norm = [n/mag for n in l_norm]
ls_coords = (l_norm[0], l_norm[1], l_norm[2])
else:
ls_coords = (l['x'], l['y'], l['z'])
# Get speaker azimuths
ls_az = math.atan2(l['y'],l['x'])%(2*math.pi)
ls_az_list.append(ls_az*180/math.pi)
#print ls_az*180/math.pi
# Get point azimuth
point_az = math.atan2(point[1],point[0])%(2*math.pi)
# Calculate the euclidean distance
dist.append(distance.euclidean(point,ls_coords))
# Calculate angle differences
az_diff.append(abs(ls_az - point_az))
# Print
#print("Loudspeaker %i (%s) position: %s" %(int(l['id']),l['name'],ls_coords))
if len(dist) == 0:
closest_channel = 'None'
else:
# Find the loudspeaker with the smallest euclidean distance
closest = dist.index(min(dist)) # For now, just get the first minimum distance
closest_az = az_diff.index(min(az_diff))
#print "Target azimuth:"
#print point_az*180/math.pi
#print "Closest LS azimuth:"
#print ls_az_list[closest_az]
# Get the ID from the closest loudspeaker
closest_channel = loudspeakers[closest]['id']
# Return the closest channel number
return closest_channel
# FUNCTION: list acceptable loudspeakers
def List_Acceptable_Loudspeakers(loudspeakers,criterion,value,force_mdo,obj):
if criterion == 'Any':
acceptable_loudspeakers = loudspeakers
else:
acceptable_loudspeakers = [l for l in loudspeakers if l[criterion] == value]
# If MDO is being forced, remove non-MDO loudspeakers from acceptable_loudspeakers
if force_mdo:
acceptable_loudspeakers = [l for l in acceptable_loudspeakers if l['mdo'] == 1]
# Certain object types can only be in loudspeakers of a certain function
if 'function' in obj:
function = obj['function']
else:
function = 'any'
if function=='narrator':
acceptable_loudspeakers = [l for l in acceptable_loudspeakers if l['function'] == 'primary']
elif function=='ambience':
acceptable_loudspeakers = [l for l in acceptable_loudspeakers if l['function'] == 'secondary']
# Loudspeaker switch - MDO loudspeakers can be turned on or off
acceptable_loudspeakers = [l for l in acceptable_loudspeakers if l['switch'] == 1]
return acceptable_loudspeakers
# FUNCTION: get object position
def Get_Object_Position(object,coordinate_system):
#print('*** In Get_Object_Position ***')
if 'direction' in object:
# Spherical coordinates in object
position_type = 'spherical'
object_position = (object['direction']['az'],object['direction']['el'],object['direction']['refdist'])
elif 'position' in object and 'az' in object['position']:
position_type = 'spherical'
object_position = (object['position']['az'],object['position']['el'],object['position']['refdist'])
elif 'position' in object and 'x' in object['position']:
# Cartesian coordinates in object
position_type = 'cartesian'
object_position = (object['position']['x'],object['position']['y'],object['position']['z'])
else:
# No position data (e.g. it's a diffuse object, HOA source, or channel object)
position_type = 'cartesian'
object_position = (0,0,0) # TODO: At the moment this just returns at the origin
# Convert the object position to the desired coordinate system
if position_type == 'cartesian' and coordinate_system == 'spherical':
az,el,refdist = cart2sphDeg(object_position[0],object_position[1],object_position[2])
position = (az,el,refdist)
elif position_type == 'spherical' and coordinate_system == 'cartesian':
x,y,z = sphDeg2cart(object_position[0],object_position[1],object_position[2])
position = (x,y,z)
else:
position = object_position
# Convert object position to floats
position = [float(p) for p in object_position]
#print('*** Exiting Get_Object_Position ***')
return position
def Print_ObjectVectorMessage(objectVector,loudspeakers):
if loudspeakers != None:
mdo_speakers = List_Acceptable_Loudspeakers(loudspeakers, 'Any', 'Any', 1, {})
else:
return
print("Sent object vector with %i objects. %i loudspeakers available for MDO:" % (len(objectVector), len(mdo_speakers)))
for speaker in mdo_speakers:
object_string = ''
for obj in objectVector:
if obj['type'] != 'channel' or int(obj['outputChannels']) != int(speaker['id']):
continue
else:
if 'content_label' in obj:
object_string = object_string + obj['content_label'] + ', '
else:
#object_string = object_string + '(unlabelled object, ID: ' + str(obj['id']) + ', '
object_string = object_string + str(obj['id']) + ', '
object_string = object_string[:-2] # Remove the last ', '
print(" Speaker %i (%s): %s" % (speaker['id'], speaker['name'], object_string))
oscaddress = "/speaker%i" % int(speaker['id'])
# Send OSC message to interface
send_loudspeaker_content(object_string,oscaddress,'127.0.0.1',4555)
print
return
class MDOAzimuthProcessor(SequenceProcessorInterface):
def __init__(self, arguments ):
SequenceProcessorInterface.__init__(self, arguments)
self.room_filename = 'None'
self.loudspeaker_config = 'None'
if 'on' in arguments.attrib:
self.on = int(arguments.attrib['on'])
else:
self.on = 1 # Default is on
if 'priority' in arguments.attrib:
self.priority = arguments.attrib['priority']
else:
self.priority = 'quality' # Default is 'quality'
if 'loudspeakerconfig' in arguments.attrib:
self.loudspeaker_config = arguments.attrib['loudspeakerconfig']
else:
self.loudspeaker_config = 'None'
if 'roomdescription' in arguments.attrib:
self.room_filename = arguments.attrib['roomdescription']
else:
self.room_filename = | |
échantillons
st.write("R² score train modèle Lasso : ",r2_score(y_train2, pred_train2).round(5))
st.write("R² score test modèle Lasso : ",r2_score(y_test2, pred_test2).round(5))
st.write("Le score R² est très bon sur les 2 échantillons (valeur à maximiser , entre 0 et 1).")
st.write("")
# MAPE (Mean Absolute Percentage Error) pour les 2 échantillons
st.write("MAPE train modèle Lasso : ", mean_absolute_percentage_error(y_train2, pred_train2).round(5))
st.write("MAPE test modèle Lasso : ", mean_absolute_percentage_error(y_test2, pred_test2).round(5))
st.write("La MAPE est très bonne sur les 2 échantillons (valeur à minimiser). \n")
st.write("")
# reshape des predictions et valeurs réelles
pred_test2_resh = np.reshape(pred_test2, 2631)
y_test2_resh = np.reshape(y_test2, 2631)
st.write("")
st.subheader("Evaluation du modèle Lasso par contrôle des Consommation réelles vs Consommation prédites sur l'échantillon de test")
# création des variables moyenne et ecart-type
st.write("Création des variables moyenne et écart type à partir du scaler")
moy2 = scaler2.mean_[-1]
ec2 = scaler2.scale_[-1]
st.write("moyenne :", moy2.round(3))
st.write("ecart-type :", ec2.round(3))
st.write("")
# Affichage des consommation observées et des consommations prédites par le modèle Elastic Net.
df_results2 = pd.DataFrame({'Consommations_observées_(MW)': (y_test2_resh*ec2)+moy2,
'Consommations_prédites_(MW)' : (pred_test2_resh*ec2)+moy2},
index=X_test2.index)
st.write(df_results2.head(50))
st.write("")
st.write("Comme pour le modèle Ridge, On observe des écarts importants entre les valeurs réelles et les valeurs prédites. \n"
"Il semble que le modèle ne soit pas aussi performant qu'observé avec les métriques.")
st.write("")
st.write("")
st.markdown("____")
st.subheader("Le modèle Elastic Net")
st.markdown("__Pourquoi tester un Elastic Net ?__")
st.write("Dans les paragraphes ci-dessus, nous avons vu que la régression Ridge utilise la pénalité L2 et la régression Lasso utilise la pénalité L1. \n"
"Le modèle ElasticNet offre pour sa part les avantages d'une combinaison linéaire des pénalités L1 et L2.")
st.write("")
st.write("__Etapes :__ \n"
" - la normalisation du dataframe avec un MinMax scaler \n"
" - la sélection des features et de la target \n"
" - le split des échantillons \n"
" - la création du modèle Elastic Net avec Cross Validation \n"
" - l'entrainement du modèle")
st.write("")
with st.echo():
# Normalisation avec le scaler MinMax
# pas de loi normale sur les features en dehors d'une distribution bimodale sur Tmoy
scaler3 = preprocessing.MinMaxScaler()
data_scaled3 = pd.DataFrame(scaler3.fit_transform(data_4Reg), index=data_4Reg.index, columns=data_4Reg.columns)
# Selection des features et de la target
features3 = data_scaled3.drop(['Consommation (MW)'], axis=1)
target3 = data_scaled3['Consommation (MW)']
# Séparation des échantillons train et test avec option Shuffle=False
X_train3, X_test3, y_train3, y_test3 = train_test_split(features3, target3,
test_size=0.2, random_state=44,
shuffle=False)
# Création du modèle ElasticNet
model_ElNet = ElasticNetCV(cv=10, l1_ratio=(0.1, 0.25, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.99),
alphas=(0.001, 0.01, 0.02, 0.025, 0.05, 0.1, 0.25, 0.5, 0.8, 1.0),
max_iter=1000000000)
# Entrainement du modèle
model_ElNet.fit(X_train3, y_train3)
st.write("")
st.write("")
st.write("__Affichage de l'intercept et des coefficients estimés pour chaque variable__")
st.write("")
# Affichage de l'intercept et des coeff estimés pour chaque variable
coeffs3 = list(model_ElNet.coef_)
coeffs3.insert(0, model_ElNet.intercept_)
feats3 = list(features3.columns)
feats3.insert(0, 'intercept')
df_coeffs = pd.DataFrame({'valeur estimée': coeffs3}, index=feats3)
st.write(df_coeffs)
st.write("Observations : la population, la température et les ETI du secteur tertiaire influencent le plus le modèle au regard des coefficients renvoyés.")
st.write("")
st.write("__Affichage du coefficient alpha sélectionné par le modèle__")
#alpha3 = model_ElNet.alpha_
#alpha3 = str(alpha3)
#str_alpha = "Coefficient alpha sélectionné par la Cross Validation du modèle : "+alpha3
st.write("Coefficient alpha sélectionné par la Cross Validation du modèle : ", model_ElNet.alpha_)
# format Dataframe
y_train3 = pd.DataFrame(y_train3)
y_test3 = pd.DataFrame(y_test3)
# predictions et format dataframe pour les preds
pred_train3 = model_ElNet.predict(X_train3)
pred_test3 = model_ElNet.predict(X_test3)
pred_train3 = pd.DataFrame(pred_train3, columns = y_train3.columns, index=y_train3.index)
pred_test3 = pd.DataFrame(pred_test3, columns = y_test3.columns, index=y_test3.index)
st.write("")
st.subheader("Evaluation du modèle avec les métriques")
# MSE (Mean Squarred Error) pour train et test
st.write("MSE train modèle Elastic Net: ",mean_squared_error(y_train3, pred_train3).round(5))
st.write("MSE test modèle Elastic Net : ",mean_squared_error(y_test3, pred_test3).round(5))
st.write("La MSE est excellente sur les 2 échantillons (valeur à minimiser).")
st.write("")
# score R² (coefficient de détermination) pour les 2 échantillons
st.write("R² score train modèle Elastic Net : ",r2_score(y_train3, pred_train3).round(5))
st.write("R² score test modèle Elastic Net : ",r2_score(y_test3, pred_test3).round(5))
st.write("Le score R² est très bon sur les 2 échantillons (valeur à maximiser , entre 0 et 1).")
st.write("")
# MAPE (Mean Absolute Percentage Error) pour les 2 échantillons
st.write("MAPE train modèle Elastic Net : ", mean_absolute_percentage_error(y_train3, pred_train3).round(5))
st.write("MAPE test modèle Elastic Net : ", mean_absolute_percentage_error(y_test3, pred_test3).round(5))
st.write("la MAPE est anormalement haute sur l'échantillon train et très bonne sur l'échantillon de test (valeur à minimiser). \n"
"L'anomalie sur la MAPE train s'explique par le mode de calul et la présence d'un 0 dans l'échantillon train.")
# création des variables min et max
min_ElNet = scaler3.data_min_[0]
max_ElNet = scaler3.data_max_[0]
st.write("")
st.write("")
st.subheader("Evaluation du modèle Elastic Net par contrôle des Consommation réelles vs Consommation prédites sur l'échantillon de test")
# Affichage des consommation observées et des consommations prédites par le modèle Elastic Net.
df_results3 = pd.DataFrame({'Consommations_observées_(MW)': (y_test3['Consommation (MW)']*(max_ElNet-min_ElNet))+min_ElNet,
'Consommations_prédites_(MW)' : (pred_test3['Consommation (MW)']*(max_ElNet-min_ElNet))+min_ElNet},
index=y_test3.index)
st.write(df_results3.head(50))
st.write("")
st.write("Comme pour le Ridge et le Lasso, on observe des écarts importants entre les valeurs réelles et les valeurs prédites \n"
"Il semble que le modèle ne soit pas aussi performant qu'observé avec les métriques.")
st.write("")
df_results3["pct_error"] = (df_results3["Consommations_prédites_(MW)"] - df_results3["Consommations_observées_(MW)"]) / df_results3["Consommations_observées_(MW)"] * 100
df_results3.insert(0, "Région", df_results3.index)
df_results3["Région"] = df_results3["Région"].str[13:]
df_results3.insert(1, "Date", df_results3.index)
df_results3["Date"] = df_results3["Date"].str[0:11]
df_results3 = df_results3.reset_index()
df_results3 = df_results3.drop(['index'], axis=1)
st.subheader("Analyse statistique des erreurs de prédiction")
# Observation des satistiques d'erreur par Région sur l'échantillon de test
p = sns.catplot(x='Région', y='pct_error', kind='violin',inner=None, data=df_results3, height=8, aspect=12/8)
sns.swarmplot(x='Région', y='pct_error', size=3, color='black', alpha=0.7, data=df_results3, ax=p.ax)
plt.xticks(rotation=70)
plt.ylabel("Erreur de prédiction en %")
plt.ylim(-80, 100)
plt.title("Analyse statistique de l'erreur de prédiction en fonction de la Région")
st.pyplot()
st.write("Cette analyse statistique laisse apparaitre plusieurs profils de régions : \n"
" - Des régions avec une distribution « compactes » telles que l’Auvergne-Rhône-Alpes ou les Hauts-de-France. \n"
" - Des régions avec une distribution « étendue » comme la Bourgogne-Franche-Comté ou le Centre-Val de Loire. \n"
" - Des régions avec une distribution « intermédiaire » comme la Normandie ou l’Île-de-France. \n"
"De plus, on observe que quelque soit le profil, les erreurs de prédiction sont nombreuses et loin d’être faibles (supérieur à 5%).")
st.write("")
st.subheader("Observation de l'erreur de prédiction sur 2 Régions")
# préparation dataframe et affichage région Bourgogne-Franche-Comté
df_results3_BFC = df_results3[df_results3['Région'] == "Bourgogne-Franche-Comté"]
df_results3_BFC_chrono = df_results3_BFC.sort_values(by = ['Date'], ascending = True)
df_results3_BFC_chrono.plot(x='Date', y=['Consommations_observées_(MW)', 'Consommations_prédites_(MW)'],
style = ["b-d", "g-h"],
title = "Consommation observé vs prédite - Région Bourgogne-Franche-Comté", figsize = (16,10))
st.pyplot()
st.write("")
# préparation dataframe et affichage région Hauts-de-France
df_results3_HDF = df_results3[df_results3['Région'] == "Hauts-de-France"]
df_results3_HDF_chrono = df_results3_HDF.sort_values(by = ['Date'], ascending = True)
df_results3_HDF_chrono.plot(x='Date', y=['Consommations_observées_(MW)', 'Consommations_prédites_(MW)'],
style = ["b-d", "g-h"],
title = "Consommation observé vs prédite - Région Hauts-de-France", figsize = (16,10))
st.pyplot()
st.write("On note que quelque soit le profil, les prédictions ne sont pas bonnes avec des écarts importants entre les prédictions et le réel.")
st.write("")
st.write("")
st.write("")
st.subheader("Conclusion sur les modèles de Régression régularisée")
st.write("")
st.write("Malgré des métriques exceptionnellement bonnes et qui laissent penser que les modèles sont performants, \n"
"il apparait que les prédictions sont totalement en écart avec les valeurs observées. \n"
"Cette divergence entre métriques et performance pose une problématique importante sur laquelle nous ne pouvons émettre que des hypothèses. \n")
st.write("Hypothèse 1 : le type de modèle serait inadapté à la relation entre la variable cible et les variables explicatives. Il nous faudrait donc choisir d’autres modèles à tester. \n"
" \n"
"Hypothèse 2 : les variables explicatives utilisées pour les modèles sont insuffisamment représentatives ou portent des biais. \n")
st.write("La divergence observée entre les métriques et les résultats de prédiction démontre qu'il ne faut jamais partir du principe | |
n
except NameError:
raise NameError('Inputs N_e, temperature, and n must be specified')
if not isinstance(n, (int, NP.ndarray)):
raise TypeError('Input n must be an integer or numpy array')
n = NP.asarray(n).reshape(-1)
if NP.any(n < 1):
raise ValueError('Lower electron level must be greater than 1')
if not isinstance(N_e, (int,float,NP.ndarray,units.Quantity)):
raise TypeError('Input N_e must be a scalar or a numpy array')
if not isinstance(N_e, units.Quantity):
N_e = NP.asarray(N_e).reshape(-1) / units.cm**3
else:
N_e = units.Quantity(NP.asarray(N_e.value).reshape(-1), N_e.unit)
if NP.any(N_e <= 0.0/units.cm**3):
raise ValueError('Input N_e must be positive')
if not isinstance(temperature, (int,float,NP.ndarray,units.Quantity)):
raise TypeError('Input temperature must be a scalar or a numpy array')
if not isinstance(temperature, units.Quantity):
temperature = NP.asarray(temperature).reshape(-1) * units.Kelvin
else:
temperature = units.Quantity(NP.asarray(temperature.value).reshape(-1), temperature.unit)
if NP.any(temperature <= 0.0*units.Kelvin):
raise ValueError('Input temperature must be positive')
if not isinstance(reference, str):
raise TypeError('Input reference must be a string')
if reference.lower() not in ['g67', 'bs71']:
raise ValueError('Specified reference invalid')
if reference.lower() == 'g67':
if nu_0 is None:
raise TypeError('Input nu_0 must be specified')
if not isinstance(nu_0, (int,float,NP.ndarray,units.Quantity)):
raise TypeError('Input nu_0 must be a scalar or a numpy array')
if not isinstance(nu_0, units.Quantity):
nu_0 = NP.asarray(nu_0).reshape(-1) * units.Hertz
else:
nu_0 = units.Quantity(NP.asarray(nu_0.value).reshape(-1), nu_0.unit)
if NP.any(nu_0 <= 0.0*units.Hertz):
raise ValueError('Input nu_0 must be positive')
if nu_0.size != n.size:
if nu_0.size != 1:
raise ValueError('Input nu_0 must contain one or same number of elements as input n')
if temperature.size != n.size:
if temperature.size != 1:
raise ValueError('Input temperature must contain one or same number of elements as input n')
if N_e.size != n.size:
if N_e.size != 1:
raise ValueError('Input N_e must contain one or same number of elements as input n')
if reference == 'g67':
warnings.warn('Pressure broadening results based on Griem (1967) only valid for Hydrogen, low temperatures and large n')
dnu_FWHM = (5.0/3.0/NP.sqrt(2.0*NP.pi)) * (FCNST.hbar/FCNST.m_e)**2 * NP.sqrt(FCNST.m_e/FCNST.k_B/temperature) * N_e * n**4 * (0.5 + NP.log(FCNST.k_B*temperature/(3.0*FCNST.hbar*nu_0*n**2)))
if NP.any(dnu_FWHM <= 0.0 * units.Hertz):
raise ValueError('Some values of dnu_FWHM found to be not positive. Check validity of inputs and conditions under which equations are valid.')
else:
warnings.warn('Pressure broadening results based on Broclehurst & Seaton (1971) only valid for Hydrogen, low temperatures, large n and n >> dn')
dnu_FWHM = 3.74e-8*units.Hertz * (N_e / (1.0 * units.cm**-3)) / (temperature / (1.0*units.Kelvin))**0.1 * (1.0*n)**4.4
return dnu_FWHM
###############################################################################
def lorentzian_line_profile(nu_0, dnu_FWHM, nu=None):
"""
---------------------------------------------------------------------------
Estimate Lorentzian line profile at given frequncies given the center
frequency and frequency FWHM
# Reference: Shaver (1975)
Inputs:
nu_0 [scalar or numpy array] Line-center frequency (in Hz).
Could also be specified as an instance of class
astropy.units.Quantity
dnu_FWHM [scalar or numpy array] Frequency FWHM (in Hz). Could also
be specified as an instance of class astropy.units.Quantity
If specified as an array, it must be of same size as nu_0
nu [scalar or numpy array] Frequency (Hz) at which line
profile is to be estimated. If specified as numpy array,
it must be of same size as input nu_0. Could also be
specified as an instance of class astropy.units.Quantity
If not specified, line profiles at the line centers (nu_0)
will be calculated.
Output:
Normalized Lorentzian line profile. Same size as input nu_0. It will
be returned as an instance of class astropy.units.Quantity. It will have
units of 'second'
---------------------------------------------------------------------------
"""
try:
nu_0, dnu_FWHM
except NameError:
raise NameError('Input nu_0 and dnu_FWHM must be specified')
if not isinstance(dnu_FWHM, (int,float,NP.ndarray,units.Quantity)):
raise TypeError('Input dnu_FWHM must be a scalar or a numpy array')
if not isinstance(dnu_FWHM, units.Quantity):
dnu_FWHM = NP.asarray(dnu_FWHM).reshape(-1) * units.Hertz
else:
dnu_FWHM = units.Quantity(NP.asarray(dnu_FWHM.value).reshape(-1), dnu_FWHM.unit)
if NP.any(dnu_FWHM <= 0.0*units.Hertz):
raise ValueError('Input dnu_FWHM must be positive')
if not isinstance(nu_0, (int,float,NP.ndarray,units.Quantity)):
raise TypeError('Input nu_0 must be a scalar or a numpy array')
if not isinstance(nu_0, units.Quantity):
nu_0 = NP.asarray(nu_0).reshape(-1) * units.Hertz
else:
nu_0 = units.Quantity(NP.asarray(nu_0.value).reshape(-1), nu_0.unit)
if NP.any(nu_0 <= 0.0*units.Hertz):
raise ValueError('Input nu_0 must be positive')
if dnu_FWHM.size != nu_0.size:
if (dnu_FWHM.size != 1) and (nu_0.size != 1):
raise ValueError('Input dnu_FWHM must contain one or same number of elements as input nu_0')
if nu is None:
nu = nu_0
else:
if not isinstance(nu, (int,float,NP.ndarray,units.Quantity)):
raise TypeError('Input nu must be a scalar or a numpy array')
if not isinstance(nu, units.Quantity):
nu = NP.asarray(nu).reshape(-1) * units.Hertz
else:
nu = units.Quantity(NP.asarray(nu.value).reshape(-1), nu.unit)
if NP.any(nu <= 0.0*units.Hertz):
raise ValueError('Input nu must be positive')
if nu.size != nu_0.size:
if (nu.size != 1) and (nu_0.size != 1):
raise ValueError('Input nu must contain one or same number of elements as input nu_0')
gamma = 0.5 * dnu_FWHM
line_profile = (gamma / NP.pi) / ((nu-nu_0)**2 + gamma**2)
return line_profile.decompose()
###############################################################################
def voight_line_profile(nu_0, gaussian_dnu_FWHM, lorentzian_dnu_FWHM, nu=None):
"""
---------------------------------------------------------------------------
Estimate Voight line profile at given frequncies given the center
frequency and frequency FWHM
# Reference: Shaver (1975)
Inputs:
nu_0 [scalar or numpy array] Line-center frequency (in Hz).
Could also be specified as an instance of class
astropy.units.Quantity
gaussian_dnu_FWHM
[scalar or numpy array] Frequency FWHM (in Hz) of the
Gaussian profile. Could also be specified as an instance of
class astropy.units.Quantity If specified as an array, it
must be of same size as nu_0
lorentzian_dnu_FWHM
[scalar or numpy array] Frequency FWHM (in Hz) of the
Lorentzian profile. Could also be specified as an instance
of class astropy.units.Quantity If specified as an array,
it must be of same size as nu_0
nu [scalar or numpy array] Frequency (Hz) at which line
profile is to be estimated. If specified as numpy array,
it must be of same size as input nu_0. Could also be
specified as an instance of class astropy.units.Quantity
If not specified, line profiles at the line centers (nu_0)
will be calculated.
Output:
Normalized Voight line profile. Same size as input nu_0. It will
be returned as an instance of class astropy.units.Quantity. It will have
units of 'second'
---------------------------------------------------------------------------
"""
try:
nu_0, gaussian_dnu_FWHM, lorentzian_dnu_FWHM
except NameError:
raise NameError('Input nu_0, gaussian_dnu_FWHM and lorentzian_dnu_FWHM must be specified')
if not isinstance(gaussian_dnu_FWHM, (int,float,NP.ndarray,units.Quantity)):
raise TypeError('Input gaussian_dnu_FWHM must be a scalar or a numpy array')
if not isinstance(gaussian_dnu_FWHM, units.Quantity):
gaussian_dnu_FWHM = NP.asarray(gaussian_dnu_FWHM).reshape(-1) * units.Hertz
else:
gaussian_dnu_FWHM = units.Quantity(NP.asarray(gaussian_dnu_FWHM.value).reshape(-1), gaussian_dnu_FWHM.unit)
if NP.any(gaussian_dnu_FWHM <= 0.0*units.Hertz):
raise ValueError('Input gaussian_dnu_FWHM must be positive')
if not isinstance(lorentzian_dnu_FWHM, (int,float,NP.ndarray,units.Quantity)):
raise TypeError('Input lorentzian_dnu_FWHM must be a scalar or a numpy array')
if not isinstance(lorentzian_dnu_FWHM, units.Quantity):
lorentzian_dnu_FWHM = NP.asarray(lorentzian_dnu_FWHM).reshape(-1) * units.Hertz
else:
lorentzian_dnu_FWHM = units.Quantity(NP.asarray(lorentzian_dnu_FWHM.value).reshape(-1), lorentzian_dnu_FWHM.unit)
if NP.any(lorentzian_dnu_FWHM <= 0.0*units.Hertz):
raise ValueError('Input lorentzian_dnu_FWHM must be positive')
if not isinstance(nu_0, (int,float,NP.ndarray,units.Quantity)):
raise TypeError('Input nu_0 must be a scalar or a numpy array')
if not isinstance(nu_0, units.Quantity):
nu_0 = NP.asarray(nu_0).reshape(-1) * units.Hertz
else:
nu_0 = units.Quantity(NP.asarray(nu_0.value).reshape(-1), nu_0.unit)
if NP.any(nu_0 <= 0.0*units.Hertz):
raise ValueError('Input nu_0 must be positive')
if gaussian_dnu_FWHM.size != nu_0.size:
if (gaussian_dnu_FWHM.size != 1) and (nu_0.size != 1):
raise ValueError('Input gaussian_dnu_FWHM must contain one or same number of elements as input nu_0')
if lorentzian_dnu_FWHM.size != nu_0.size:
if (lorentzian_dnu_FWHM.size != 1) and (nu_0.size != 1):
raise ValueError('Input lorentzian_dnu_FWHM must contain one or same number of elements as input nu_0')
if nu is None:
nu = nu_0
else:
if not isinstance(nu, (int,float,NP.ndarray,units.Quantity)):
raise TypeError('Input nu must be a scalar or a numpy array')
if not isinstance(nu, units.Quantity):
nu = NP.asarray(nu).reshape(-1) * units.Hertz
else:
nu = units.Quantity(NP.asarray(nu.value).reshape(-1), nu.unit)
if NP.any(nu <= 0.0*units.Hertz):
raise ValueError('Input nu must be positive')
if nu.size != nu_0.size:
if (nu.size != 1) and (nu_0.size != 1):
raise ValueError('Input nu must contain one or same number of elements as input nu_0')
gamma = 0.5 * lorentzian_dnu_FWHM
sigma = gaussian_dnu_FWHM / (2.0 * NP.sqrt(2.0 * NP.log(2.0)))
x = nu - nu_0
z = (x + 1j * gamma) / (sigma * NP.sqrt(2.0))
line_profile = SPS.wofz(z.decompose().value).real / (sigma * NP.sqrt(2.0 * NP.pi))
return line_profile.decompose()
###############################################################################
def voight_FWHM(gaussian_FWHM, lorentzian_FWHM):
"""
---------------------------------------------------------------------------
Estimate FWHM of Voight line profile given the FWHM of the Gaussian and
Lorentzian profiles
# Reference: <NAME>. & <NAME> (February 1977)
# Reference: Wikipedia on Voight profile
| |
from rdflib import Graph, plugin
from rdflib.parser import Parser
from rdflib.serializer import Serializer
import sys
import os
import argparse
import fnmatch
#It is a parse library that we can use
#Try to integrate Web Service
plugin.register("rdf-json", Parser, "rdflib_rdfjson.rdfjson_parser", "RdfJsonParser")
plugin.register("rdf-json", Serializer, "rdflib_rdfjson.rdfjson_serializer", "RdfJsonSerializer")
INPUT_FORMAT_TO_EXTENSIONS = {"application/rdf+xml": [".xml", ".rdf", ".owl"],
"text/html": [".html"],
"xml": [".xml", ".rdf", ".owl"],
"rdf-json": [".json"],
"json-ld": [".jsonld", ".json-ld"],
"ttl": [".ttl"],
"nt": [".nt"],
"nquads": [".nq"],
"trix": [".xml", ".trix"],
"rdfa": [".xhtml", ".html"],
"n3": [".n3"]}
OUTPUT_FORMAT_TO_EXTENSION = {"xml": ".xml",
"pretty-xml": ".xml",
"rdf-json": ".json",
"json-ld": ".jsonld",
"nt": ".nt",
"nquads": ".nq",
"trix": ".xml",
"ttl": ".ttl",
"n3": ".n3"}
# a function that returns the script description as a string
def description():
return """
Convert one RDF serialization into another.
This script allows you to convert several files at once. It can
convert individual files or even whole directory trees at once
(with or without preserving the directory tree structure).
"""
# a function that returns additional help as a string
def epilog():
s = "Default extensions for INPUT format:\n"
for inputFormat, extensions in INPUT_FORMAT_TO_EXTENSIONS.items():
s += " - %s : %s\n" % (inputFormat.ljust(19), extensions)
s += "\n"
s += "Default extension for OUTPUT format:\n"
for ouptutFormat, extension in OUTPUT_FORMAT_TO_EXTENSION.items():
s += " - %s : '%s'\n" % (ouptutFormat.ljust(10), extension)
return s
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=description(),
epilog=epilog())
parser.add_argument("INPUT",
metavar="INPUT",
type=str,
nargs="+",
help="A list of input files or input directories. When *files* are " \
"specified, they will be parsed and converted regardless of their " \
"extension. But when *directories* are specified, the script will " \
"try to find files inside these directories that match certain " \
"extensions (either all default extensions for the input format as " \
"specified by the --from flag, or the custom extension(s) as " \
" specified directly by the --from-ext flag). Input directories may " \
" be searched recursively via the -R flag.")
parser.add_argument("--from",
dest="FROM",
action="store",
required=True,
choices=INPUT_FORMAT_TO_EXTENSIONS.keys(),
help="The serialization format of the input files to convert.")
parser.add_argument("--from-ext",
dest="FROM_EXT",
action="store",
nargs="+",
default=None,
help="The file extensions to match when browsing input directories " \
"(could be .owl, .xml, .n3, .jsonld, .rdf, ...). You only have to " \
"provide this flag if you're unhappy with the default extensions " \
"for the given input format. You can view these default extensions " \
"at the end of this help.")
parser.add_argument("-R", "--recursive",
dest="recursive",
action="store_const",
const=True,
default=False,
help="When input directories are given, browse them recursively to find " \
"and convert files.")
parser.add_argument("-o",
dest="OUTPUTDIR",
action="store",
nargs="?",
default=None,
help="The directory to write the output files " \
"(omit this flag to print the output to the stdout).")
parser.add_argument("--to",
dest="TO",
action="store",
required=True,
choices=OUTPUT_FORMAT_TO_EXTENSION.keys(),
help="The serialization format of the output.")
parser.add_argument("--to-ext",
dest="TO_EXT",
action="store",
default=None,
help="The file extension of the output files that will be created " \
"(could be .owl, .xml, .n3, .jsonld, .rdf, ...). You only have to " \
"provide this flag if you're unhappy with the default extension for " \
"the given output format. You can view these default extensions " \
"at the end of this help. When the -o flag is not " \
"specified, the output will be written the the stdout instead of " \
"files, so the --to-ext flag will have no effect. Don't forget to " \
"add a dot (.) in front of the extension name (so provide .foo " \
"instead of foo).")
parser.add_argument("-f", "--force",
dest="force",
action="store_const",
const=True,
default=False,
help="Always overwrite existing output files, instead of prompting.")
parser.add_argument("-n", "--no-tree",
dest="no_tree",
action="store_const",
const=True,
default=False,
help="When given in combination with -R (recursive input file matching), " \
"all output files will be written in the same \"flat\" directory. " \
"Without this -n flag, the same directory structure of the input " \
"directory will be created (if necessary) and the output files will " \
"be written to the corresponding directories of where they were " \
"found in the input directories. Only those output directories will " \
"be created for the input directories that contain at least one " \
"matching input file. If you specify this flag, all output files " \
"will be stored in the same directory and you may run into filename " \
"collisions!")
parser.add_argument("-s", "--simulate",
dest="simulate",
action="store_const",
const=True,
default=False,
help="Do not write any output files, but just print a message for each "
"file that they *would* be written without the -s flag.")
parser.add_argument("-v", "--verbose",
dest="verbose",
action="store_const",
const=True,
default=False,
help="Verbosely print some debugging info.")
args = parser.parse_args()
# a simple function to log verbose info
def VERBOSE(msg):
if args.verbose:
print(msg)
# process each input file sequentially:
for inputFileOrDir in args.INPUT:
VERBOSE("Now processing input file or directory '%s'" % inputFileOrDir)
# check if the file exists, and if it's a directory or a file
isdir = False
if os.path.exists(inputFileOrDir):
if os.path.isdir(inputFileOrDir):
VERBOSE(" - '%s' exists and is a directory" % inputFileOrDir)
inputFileOrDir = os.path.abspath(inputFileOrDir)
isdir = True
else:
VERBOSE(" - '%s' exists and is a file" % inputFileOrDir)
else:
sys.exit("!!! ERROR: Input file '%s' was not found !!!" % inputFileOrDir)
VERBOSE(" - Input format: %s" % args.FROM)
VERBOSE(" - Output format: %s" % args.TO)
# find out which extensions we should match
if args.FROM_EXT:
inputExtensions = args.FROM_EXT
else:
inputExtensions = INPUT_FORMAT_TO_EXTENSIONS[args.FROM]
VERBOSE(" - Input extensions: %s" % inputExtensions)
# find out which output extension we should write
if args.TO_EXT:
outputExtension = args.TO_EXT
else:
outputExtension = OUTPUT_FORMAT_TO_EXTENSION[args.TO]
VERBOSE(" - Output extension: '%s'" % outputExtension)
inputFiles = []
if isdir:
VERBOSE(" - Now walking the directory (recursive = %s):" % args.recursive)
for root, dirnames, filenames in os.walk(inputFileOrDir):
VERBOSE(" * Finding files in '%s'" % root)
for extension in inputExtensions:
for filename in fnmatch.filter(filenames, "*%s" % extension):
VERBOSE(" -> found '%s'" % filename)
inputFiles.append(os.path.join(root, filename))
if not args.recursive:
break
else:
inputFiles.append(inputFileOrDir)
# create the graph, and parse the input files
for inputFile in inputFiles:
g = Graph()
g.parse(inputFile, format=args.FROM)
VERBOSE(" - the graph was parsed successfully")
# if no output directory is specified, just print the output to the stdout
if args.OUTPUTDIR is None:
output = g.serialize(None, format=args.TO)
VERBOSE(" - output:")
print(output)
# if an output directory was provided, but it doesn't exist, then exit the script
elif not os.path.exists(args.OUTPUTDIR):
sys.exit("!!! ERROR: Output dir '%s' was not found !!!" % args.OUTPUTDIR)
# if the output directory was given and it exists, then figure out the output filename
# and write the output to disk
else:
head, tail = os.path.split(inputFile)
VERBOSE(" - head, tail: %s, %s" % (head, tail))
if args.no_tree:
outputAbsPath = os.path.abspath(args.OUTPUTDIR)
else:
# remove the common prefix from the head and the input directory
# (otherwise the given input path will also be added to the output path)
commonPrefix = os.path.commonprefix([head, inputFileOrDir])
VERBOSE(" - inputFileOrDir: %s" % inputFileOrDir)
VERBOSE(" - common prefix: %s" % commonPrefix)
headWithoutCommonPrefix = head[len(commonPrefix) + 1:]
VERBOSE(" - head without common prefix: %s" % headWithoutCommonPrefix)
outputAbsPath = os.path.join(os.path.abspath(args.OUTPUTDIR),
headWithoutCommonPrefix)
VERBOSE(" - output absolute path: %s" % outputAbsPath)
outputFileName = os.path.splitext(tail)[0] + outputExtension
outputAbsFileName = os.path.join(outputAbsPath, outputFileName)
VERBOSE(" - output filename: '%s'" % outputAbsFileName)
# for safety, check that we're not overwriting the input file
if outputAbsFileName == os.path.abspath(inputFile):
sys.exit("!!! ERROR: Input file '%s' is the same as output file !!!" \
% outputAbsFileName)
else:
VERBOSE(" - this file is different from the input filename")
# if the output file exists already and the "force" flag is not set,
# then ask for permission to overwrite the file
skipThisFile = False
if not args.force and os.path.exists(outputAbsFileName):
yesOrNo = input("Overwrite %s? (y/n): " % outputAbsFileName)
if yesOrNo.lower() not in ["y", "yes"]:
skipThisFile = True
if skipThisFile:
VERBOSE(" - this file will be skipped")
else:
dirName = os.path.dirname(outputAbsFileName)
if not os.path.exists(dirName):
if args.simulate:
print("Simulation: this directory tree would be written: %s" % dirName)
else:
VERBOSE(" - Now creating %s since it does not exist yet" % dirName)
os.makedirs(dirName)
if args.simulate:
print("Simulation: this file would be written: %s" % | |
@var sql: str -- The sql statement
# @var err: str -- The error message for SQLite
# @var conn: object -- The custom database connection for Postgres and MySQL
# @var cur: object -- The custom connection cursor for Postgres and MySQL
#
# @return bool
##
def _create_database(self, database:str=None):
# Check the development
if not self.development:
alert = '''----------------------------------------------------------\n'''
alert += '''INFO!\n'''
alert += '''This method is only available in development!\n'''
alert += '''----------------------------------------------------------'''
# Alert the user
print(alert)
# Exit the program
exit()
# Check the required params
if not database:
# Developer mode
if self.debug:
# Raise error
raise Exception("You must provide the required parameters: ['database']")
# Production mode
else:
print("You must provide the required parameters: ['database']")
return False
# Database already exists
if self._exist_database(database=database):
# Developer mode
if self.debug:
# Raise error
raise Exception(f'Database "{database}" already exists!')
# Production mode
else:
print(f'Database "{database}" already exists!')
return False
# Database not exists
else:
# SQLite
if self.db_system == 'SQLite':
# Check database file name and extension
if not check_file(database, '.db') and not check_file(database, '.sqlite') and not check_file(database, '.sqlite3'):
# Prepare error message
err = '''Database file name and/or extension is invalid!\n'''
err += '''Valid Names: A-Z, a-z, _\n'''
err += '''Valid Extensions: .sqlite3, .sqlite, .db'''
# Check debug mode
if self.debug:
# Raise error
raise Exception(err)
# Return the result
else:
return False
# Everything is fine
else:
# Attempt the process
try:
# Create the database
self.conn = DatabaseAPI.connect(database)
# Return the result
return True
# Handle the errors
except NameError:
# Developer mode
if self.debug:
# Raise error
raise Exception('Cannot create the database!')
# Production mode
else:
print('Cannot create the database!')
return False
# MySQL
elif self.db_system == 'MySQL':
# Create a database connection
conn = DatabaseAPI.connect(
host=self.host,
user=self.user,
password=self.password,
)
# Attempt the process
try:
# Prepare sql
sql = f'''CREATE DATABASE `{database}`;'''
# Create the connection cursor
cur = conn.cursor()
# Excecute the sql
cur.execute(sql)
# Close the current connection
conn.close()
# Return the result
return True
# Handle the errors
except NameError as err:
# Developer mode
if self.debug:
# Raise error
raise Exception(err)
# Production mode
else:
print(err)
return False
# Postgres
elif self.db_system == 'Postgres':
# Close the global connection
self.conn.close()
# Create a database connection
conn = DatabaseAPI.connect(
host=self.host,
port=self.port,
user=self.user,
password=<PASSWORD>,
)
# Attempt the process
try:
# Prepare sql
sql = f'''CREATE DATABASE "{database}";'''
# Set the transaction to autocommit
conn.autocommit = True
# Create the connection cursor
cur = conn.cursor()
# Excecute the sql
cur.execute(sql)
# Close the current connection
conn.close()
# Return the result
return True
# Handle the errors
except NameError as err:
# Developer mode
if self.debug:
# Raise error
raise Exception(err)
# Production mode
else:
print(err)
return False
################
# Read Methods #
################
##
# @desc Selects rows
#
# @param table: str -- *Required Table name (ex. "users")
# @param cols: list -- Optional Columns (ex. ["id", "first_name", "last_name"])
# @param where: dict -- Optional WHERE statement (ex. {"id": "2", "username": "admin"})
# @param order_by: dict -- Optional ORDER BY statement (ex. {"id": "ASC", "date": "DESC"})
# @param group_by: str -- Optional GROUP BY statement (ex. 'country')
# @param limit: int -- Optional LIMIT statement (ex. "10")
# @param offset: int -- Optional OFFSET statement (ex. "10")
#
# @var sql: str -- The sql statement
# @var data_bind: list -- Data binding against SQL Injection
# @var where_sql: list -- A placeholder for the WHERE clause
# @var order_by_sql: list -- A placeholder for the ORDER BY clause
# @var in_bind: list -- A placeholder IN operator
# @var in_sql: str -- The sql statement for IN operator
#
# @return class: type
##
def read(self, table:str, cols:list=[], where:dict={}, order_by:dict={}, group_by:str=None, limit:int=None, offset:int=None):
# Check required params
if not table:
# Developer mode
if self.debug:
# Raise error
raise Exception("You must provide the required parameters: ['table']")
# Production mode
else:
print("You must provide the required parameters: ['table']")
return False
# The default variables
data_col = []
data_bind = []
where_sql = []
order_by_sql = []
# Check cols
if not cols or cols == ['*'] or cols == ["*"]:
cols = '*'
else:
# SQLite
if self.db_system == 'SQLite':
for col in cols:
data_col.append(f'"{col}"')
# data_col.append(f"'{col}'")
cols = ', '.join(data_col)
# MySQL
elif self.db_system == 'MySQL':
for col in cols:
data_col.append(f'`{col}`')
cols = ', '.join(data_col)
# Postgres
elif self.db_system == 'Postgres':
for col in cols:
data_col.append(f'"{col}"')
cols = ', '.join(data_col)
# Check where
if where:
for key, value in where.items():
in_bind = []
# Remove the Ineffective characters (#)
key = delete_chars(key, '#')
# Check patterns
# Equal to (strict)
if re.search('--equal$', key) or re.search('--e$', key):
key = key.replace('--equal', '')
key = key.replace('--e', '')
# SQLite and Postgres
if self.db_system == 'SQLite' or self.db_system == 'Postgres':
where_sql.append(f'"{key}"={self.sp_char}')
# MySQL
elif self.db_system == 'MySQL':
where_sql.append(f'`{key}`={self.sp_char}')
data_bind.append(value)
# Not equal to
elif re.search('--not-equal$', key) or re.search('--ne$', key):
key = key.replace('--not-equal', '')
key = key.replace('--ne', '')
# SQLite and Postgres
if self.db_system == 'SQLite' or self.db_system == 'Postgres':
where_sql.append(f'"{key}"<>{self.sp_char}')
# MySQL
elif self.db_system == 'MySQL':
where_sql.append(f'`{key}`<>{self.sp_char}')
data_bind.append(value)
# Greater than
elif re.search('--greater-than$', key) or re.search('--gt$', key):
key = key.replace('--greater-than', '')
key = key.replace('--gt', '')
# SQLite and Postgres
if self.db_system == 'SQLite' or self.db_system == 'Postgres':
where_sql.append(f'"{key}">{self.sp_char}')
# MySQL
elif self.db_system == 'MySQL':
where_sql.append(f'`{key}`>{self.sp_char}')
data_bind.append(value)
# Greater than or equal to
elif re.search('--greater-equal$', key) or re.search('--ge$', key):
key = key.replace('--greater-equal', '')
key = key.replace('--ge', '')
# SQLite and Postgres
if self.db_system == 'SQLite' or self.db_system == 'Postgres':
where_sql.append(f'"{key}">={self.sp_char}')
# MySQL
elif self.db_system == 'MySQL':
where_sql.append(f'`{key}`>={self.sp_char}')
data_bind.append(value)
# Less than
elif re.search('--less-than$', key) or re.search('--lt$', key):
key = key.replace('--less-than', '')
key = key.replace('--lt', '')
# SQLite and Postgres
if self.db_system == 'SQLite' or self.db_system == 'Postgres':
where_sql.append(f'"{key}"<{self.sp_char}')
# MySQL
elif self.db_system == 'MySQL':
where_sql.append(f'`{key}`<{self.sp_char}')
data_bind.append(value)
# Less than or equal to
elif re.search('--less-equal$', key) or re.search('--le$', key):
key = key.replace('--less-equal', '')
key = key.replace('--le', '')
# SQLite and Postgres
if self.db_system == 'SQLite' or self.db_system == 'Postgres':
where_sql.append(f'"{key}"<={self.sp_char}')
# MySQL
elif self.db_system == 'MySQL':
where_sql.append(f'`{key}`<={self.sp_char}')
data_bind.append(value)
# LIKE
elif re.search('--like$', key) or re.search('--l$', key):
key = key.replace('--like', '')
key = key.replace('--l', '')
# SQLite and Postgres
if self.db_system == 'SQLite' or self.db_system == 'Postgres':
where_sql.append(f'"{key}" LIKE {self.sp_char}')
# MySQL
elif self.db_system == 'MySQL':
where_sql.append(f'`{key}` LIKE {self.sp_char}')
data_bind.append(value)
# NOT LIKE
elif re.search('--not-like$', key) or re.search('--nl$', key):
key = key.replace('--not-like', '')
key = key.replace('--nl', '')
# SQLite and Postgres
if self.db_system == 'SQLite' or self.db_system == 'Postgres':
where_sql.append(f'"{key}" NOT LIKE {self.sp_char}')
# MySQL
elif self.db_system == 'MySQL':
where_sql.append(f'`{key}` NOT LIKE {self.sp_char}')
data_bind.append(value)
# BETWEEN
elif re.search('--between$', key) or re.search('--b$', key):
key = key.replace('--between', '')
key = key.replace('--b', '')
# SQLite and Postgres
if self.db_system == 'SQLite' or self.db_system == 'Postgres':
where_sql.append(f'"{key}" BETWEEN {self.sp_char} AND {self.sp_char}')
# MySQL
elif self.db_system == 'MySQL':
where_sql.append(f'`{key}` BETWEEN {self.sp_char} AND {self.sp_char}')
data_bind.append(value[0])
data_bind.append(value[1])
# NOT BETWEEN
elif re.search('--not-between$', key) or re.search('--nb$', key):
key = key.replace('--not-between', '')
key = key.replace('--nb', '')
# SQLite and Postgres
if self.db_system == 'SQLite' or self.db_system == 'Postgres':
where_sql.append(f'"{key}" NOT BETWEEN {self.sp_char} AND {self.sp_char}')
# MySQL
elif self.db_system == 'MySQL':
where_sql.append(f'`{key}` NOT BETWEEN {self.sp_char} AND {self.sp_char}')
data_bind.append(value[0])
data_bind.append(value[1])
# IN
elif re.search('--in$', key) or re.search('--i$', key):
key = key.replace('--in', '')
key = key.replace('--i', '')
for x in value:
in_bind.append(self.sp_char)
data_bind.append(x)
in_sql =','.join(in_bind)
# SQLite and Postgres
if self.db_system == 'SQLite' or self.db_system == 'Postgres':
where_sql.append(f'"{key}" IN ({in_sql})')
# MySQL
elif self.db_system == 'MySQL':
where_sql.append(f'`{key}` IN ({in_sql})')
# NOT IN
elif re.search('--not-in$', key) or re.search('--ni$', key):
key = key.replace('--not-in', '')
key = key.replace('--ni', '')
for x in value:
in_bind.append(self.sp_char)
data_bind.append(x)
in_sql =','.join(in_bind)
# SQLite and Postgres
if self.db_system == 'SQLite' or self.db_system | |
<reponame>hmtrii/tirg
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data for training and testing."""
import numpy as np
import PIL
import skimage.io
import torch
import json
import torch.utils.data
import torchvision
import warnings
import random
class BaseDataset(torch.utils.data.Dataset):
"""Base class for a dataset."""
def __init__(self):
super(BaseDataset, self).__init__()
self.imgs = []
self.test_queries = []
def get_loader(self,
batch_size,
shuffle=False,
drop_last=False,
num_workers=0):
return torch.utils.data.DataLoader(
self,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
drop_last=drop_last,
collate_fn=lambda i: i)
def get_test_queries(self):
return self.test_queries
def get_all_texts(self):
raise NotImplementedError
def __getitem__(self, idx):
return self.generate_random_query_target()
def generate_random_query_target(self):
raise NotImplementedError
def get_img(self, idx, raw_img=False):
raise NotImplementedError
class CSSDataset(BaseDataset):
"""CSS dataset."""
def __init__(self, path, split='train', transform=None):
super(CSSDataset, self).__init__()
self.img_path = path + '/images/'
self.transform = transform
self.split = split
self.data = np.load(path + '/css_toy_dataset_novel2_small.dup.npy').item()
self.mods = self.data[self.split]['mods']
self.imgs = []
for objects in self.data[self.split]['objects_img']:
label = len(self.imgs)
# if self.data[self.split].has_key('labels'):
if 'labels' in self.data[self.split]:
label = self.data[self.split]['labels'][label]
self.imgs += [{
'objects': objects,
'label': label,
'captions': [str(label)]
}]
self.imgid2modtarget = {}
for i in range(len(self.imgs)):
self.imgid2modtarget[i] = []
for i, mod in enumerate(self.mods):
for k in range(len(mod['from'])):
f = mod['from'][k]
t = mod['to'][k]
self.imgid2modtarget[f] += [(i, t)]
self.generate_test_queries_()
def generate_test_queries_(self):
test_queries = []
for mod in self.mods:
for i, j in zip(mod['from'], mod['to']):
test_queries += [{
'source_img_id': i,
'target_caption': self.imgs[j]['captions'][0],
'mod': {
'str': mod['to_str']
}
}]
self.test_queries = test_queries
def get_1st_training_query(self):
i = np.random.randint(0, len(self.mods))
mod = self.mods[i]
j = np.random.randint(0, len(mod['from']))
self.last_from = mod['from'][j]
self.last_mod = [i]
return mod['from'][j], i, mod['to'][j]
def get_2nd_training_query(self):
modid, new_to = random.choice(self.imgid2modtarget[self.last_from])
while modid in self.last_mod:
modid, new_to = random.choice(self.imgid2modtarget[self.last_from])
self.last_mod += [modid]
# mod = self.mods[modid]
return self.last_from, modid, new_to
def generate_random_query_target(self):
try:
if len(self.last_mod) < 2:
img1id, modid, img2id = self.get_2nd_training_query()
else:
img1id, modid, img2id = self.get_1st_training_query()
except:
img1id, modid, img2id = self.get_1st_training_query()
out = {}
out['source_img_id'] = img1id
out['source_img_data'] = self.get_img(img1id)
out['target_img_id'] = img2id
out['target_img_data'] = self.get_img(img2id)
out['mod'] = {'id': modid, 'str': self.mods[modid]['to_str']}
return out
def __len__(self):
return len(self.imgs)
def get_all_texts(self):
return [mod['to_str'] for mod in self.mods]
def get_img(self, idx, raw_img=False, get_2d=False):
"""Gets CSS images."""
def generate_2d_image(objects):
img = np.ones((64, 64, 3))
colortext2values = {
'gray': [87, 87, 87],
'red': [244, 35, 35],
'blue': [42, 75, 215],
'green': [29, 205, 20],
'brown': [129, 74, 25],
'purple': [129, 38, 192],
'cyan': [41, 208, 208],
'yellow': [255, 238, 51]
}
for obj in objects:
s = 4.0
if obj['size'] == 'large':
s *= 2
c = [0, 0, 0]
for j in range(3):
c[j] = 1.0 * colortext2values[obj['color']][j] / 255.0
y = obj['pos'][0] * img.shape[0]
x = obj['pos'][1] * img.shape[1]
if obj['shape'] == 'rectangle':
img[int(y - s):int(y + s), int(x - s):int(x + s), :] = c
if obj['shape'] == 'circle':
for y0 in range(int(y - s), int(y + s) + 1):
x0 = x + (abs(y0 - y) - s)
x1 = 2 * x - x0
img[y0, int(x0):int(x1), :] = c
if obj['shape'] == 'triangle':
for y0 in range(int(y - s), int(y + s)):
x0 = x + (y0 - y + s) / 2
x1 = 2 * x - x0
x0, x1 = min(x0, x1), max(x0, x1)
img[y0, int(x0):int(x1), :] = c
return img
if self.img_path is None or get_2d:
img = generate_2d_image(self.imgs[idx]['objects'])
else:
img_path = self.img_path + ('/css_%s_%06d.png' % (self.split, int(idx)))
with open(img_path, 'rb') as f:
img = PIL.Image.open(f)
img = img.convert('RGB')
if raw_img:
return img
if self.transform:
img = self.transform(img)
return img
class Fashion200k(BaseDataset):
"""Fashion200k dataset."""
def __init__(self, path, split='train', transform=None):
super(Fashion200k, self).__init__()
self.split = split
self.transform = transform
self.img_path = path + '/'
# get label files for the split
label_path = path + '/labels/'
from os import listdir
from os.path import isfile
from os.path import join
label_files = [
f for f in listdir(label_path) if isfile(join(label_path, f))
]
label_files = [f for f in label_files if split in f]
# read image info from label files
self.imgs = []
def caption_post_process(s):
return s.strip().replace('.',
'dotmark').replace('?', 'questionmark').replace(
'&', 'andmark').replace('*', 'starmark')
for filename in label_files:
print('read ' + filename)
with open(label_path + '/' + filename) as f:
lines = f.readlines()
for line in lines:
line = line.split(' ')
img = {
'file_path': line[0],
'detection_score': line[1],
'captions': [caption_post_process(line[2])],
'split': split,
'modifiable': False
}
self.imgs += [img]
print('Fashion200k:', len(self.imgs), 'images')
# generate query for training or testing
if split == 'train':
self.caption_index_init_()
else:
self.generate_test_queries_()
def get_different_word(self, source_caption, target_caption):
source_words = source_caption.split()
target_words = target_caption.split()
for source_word in source_words:
if source_word not in target_words:
break
for target_word in target_words:
if target_word not in source_words:
break
mod_str = 'replace ' + source_word + ' with ' + target_word
return source_word, target_word, mod_str
def generate_test_queries_(self):
file2imgid = {}
for i, img in enumerate(self.imgs):
file2imgid[img['file_path']] = i
with open(self.img_path + '/test_queries.txt') as f:
lines = f.readlines()
self.test_queries = []
for line in lines:
source_file, target_file = line.split()
idx = file2imgid[source_file]
target_idx = file2imgid[target_file]
source_caption = self.imgs[idx]['captions'][0]
target_caption = self.imgs[target_idx]['captions'][0]
source_word, target_word, mod_str = self.get_different_word(
source_caption, target_caption)
self.test_queries += [{
'source_img_id': idx,
'target_img_id': target_idx,
'source_caption': source_caption,
'target_caption': target_caption,
'mod': {
'str': mod_str
}
}]
def caption_index_init_(self):
""" index caption to generate training query-target example on the fly later"""
# index caption 2 caption_id and caption 2 image_ids
caption2id = {}
id2caption = {}
caption2imgids = {}
for i, img in enumerate(self.imgs):
for c in img['captions']:
# if not caption2id.has_key(c):
if c not in caption2id:
id2caption[len(caption2id)] = c
caption2id[c] = len(caption2id)
caption2imgids[c] = []
caption2imgids[c].append(i)
self.caption2imgids = caption2imgids
print(len(caption2imgids), 'unique captions')
# parent captions are 1-word shorter than their children
parent2children_captions = {}
for c in caption2id.keys():
for w in c.split():
p = c.replace(w, '')
p = p.replace(' ', ' ').strip()
# if not parent2children_captions.has_key(p):
if p not in parent2children_captions:
parent2children_captions[p] = []
if c not in parent2children_captions[p]:
parent2children_captions[p].append(c)
self.parent2children_captions = parent2children_captions
# identify parent captions for each image
for img in self.imgs:
img['modifiable'] = False
img['parent_captions'] = []
for p in parent2children_captions:
if len(parent2children_captions[p]) >= 2:
for c in parent2children_captions[p]:
for imgid in caption2imgids[c]:
self.imgs[imgid]['modifiable'] = True
self.imgs[imgid]['parent_captions'] += [p]
num_modifiable_imgs = 0
for img in self.imgs:
if img['modifiable']:
num_modifiable_imgs += 1
print('Modifiable images', num_modifiable_imgs)
def caption_index_sample_(self, idx):
while not self.imgs[idx]['modifiable']:
idx = np.random.randint(0, len(self.imgs))
# find random target image (same parent)
img = self.imgs[idx]
while True:
p = random.choice(img['parent_captions'])
c = random.choice(self.parent2children_captions[p])
if c not in img['captions']:
break
target_idx = random.choice(self.caption2imgids[c])
# find the word difference between query and target (not in parent caption)
source_caption = self.imgs[idx]['captions'][0]
target_caption = self.imgs[target_idx]['captions'][0]
source_word, target_word, mod_str = self.get_different_word(
source_caption, target_caption)
return idx, target_idx, source_word, target_word, mod_str
def get_all_texts(self):
texts = []
for img in self.imgs:
for c in img['captions']:
texts.append(c)
return texts
def __len__(self):
return len(self.imgs)
def __getitem__(self, idx):
idx, target_idx, source_word, target_word, mod_str = self.caption_index_sample_(
idx)
out = {}
out['source_img_id'] = idx
out['source_img_data'] = self.get_img(idx)
out['source_caption'] = self.imgs[idx]['captions'][0]
out['target_img_id'] = target_idx
out['target_img_data'] = self.get_img(target_idx)
out['target_caption'] = self.imgs[target_idx]['captions'][0]
out['mod'] = {'str': mod_str}
return out
def get_img(self, idx, raw_img=False):
img_path = self.img_path + self.imgs[idx]['file_path']
with open(img_path, 'rb') as f:
img = PIL.Image.open(f)
img = img.convert('RGB')
if raw_img:
return img
if self.transform:
img = self.transform(img)
return img
class MITStates(BaseDataset):
"""MITStates dataset."""
def __init__(self, path, split='train', transform=None):
super(MITStates, self).__init__()
self.path = path
self.transform = transform
self.split = split
self.imgs = []
test_nouns = [
u'armor', u'bracelet', u'bush', u'camera', u'candy', u'castle',
u'ceramic', u'cheese', u'clock', u'clothes', u'coffee', u'fan', u'fig',
u'fish', u'foam', u'forest', u'fruit', u'furniture', u'garden', u'gate',
u'glass', u'horse', u'island', u'laptop', u'lead', u'lightning',
u'mirror', u'orange', u'paint', u'persimmon', u'plastic', u'plate',
u'potato', u'road', u'rubber', u'sand', u'shell', u'sky', u'smoke',
u'steel', u'stream', u'table', u'tea', u'tomato', u'vacuum', u'wax',
u'wheel', u'window', u'wool'
]
| |
"SR": ("Structured Report Document", [32, 33]),
},
"StudentTTest": {
"113068": ("Student's T-test", [218, 7180, 7469]),
},
"StudiesImported": {
"110027": ("Studies Imported", [7008]),
},
"Study": {
"113014": ("Study", [7012, 10000]),
},
"StudyDate": {
"111060": ("Study Date", []),
},
"StudyInstanceUID": {
"110180": ("Study Instance UID", [404, 10001]),
},
"StudyPerformedDidNotMatchRequest": {
"130573": ("Study performed did not match request", [6314]),
},
"StudyTime": {
"111061": ("Study Time", []),
},
"SubjectAge": {
"121033": ("Subject Age", []),
},
"SubjectBirthDate": {
"121031": ("Subject Birth Date", []),
},
"SubjectBreed": {
"121035": ("Subject Breed", []),
},
"SubjectClass": {
"121024": ("Subject Class", []),
},
"SubjectID": {
"121030": ("Subject ID", []),
},
"SubjectName": {
"121029": ("Subject Name", []),
},
"SubjectSex": {
"121032": ("Subject Sex", []),
},
"SubjectSpecies": {
"121034": ("Subject Species", []),
},
"SubjectTimePointIdentifier": {
"126070": ("Subject Time Point Identifier", []),
},
"SubjectUID": {
"121028": ("Subject UID", []),
},
"SubjectiveRefraction": {
"SRF": ("Subjective Refraction", [29, 30, 33]),
},
"SuboptimalContrastTiming": {
"128554": ("Suboptimal contrast timing", [10034]),
},
"SuboptimalPatientPreparation": {
"130575": ("Suboptimal patient preparation", [6314]),
},
"Subpleural": {
"112153": ("Subpleural", [6124, 6126]),
},
"SubpleuralLine": {
"112115": ("Subpleural line", [6102, 6103]),
},
"SubscapularFossa": {
"112098": ("Subscapular Fossa", [6115, 8134]),
},
"SubstanceUseHistory": {
"111545": ("Substance Use History", []),
},
"SubtractionImageMissing": {
"130581": ("Subtraction image missing", [6315, 6317]),
},
"Succeeded": {
"111222": ("Succeeded", [6042]),
},
"SuccessfulAnalyses": {
"111062": ("Successful Analyses", []),
},
"SuccessfulDetections": {
"111063": ("Successful Detections", []),
},
"SuggestiveOfMalignancyTakeAppropriateAction": {
"111146": ("Suggestive of malignancy - take appropriate action", [6028, 6029]),
},
"SumAverageOfGLCM": {
"128787": ("Sum Average of GLCM", []),
},
"SumEntropyOfGLCM": {
"128789": ("Sum Entropy of GLCM", []),
},
"SumOfSegmentedVoxelMethodForVolume": {
"126030": ("Sum of segmented voxel method for volume", [7474]),
},
"SumVarianceOfGLCM": {
"128788": ("Sum Variance of GLCM", []),
},
"Summary": {
"121111": ("Summary", []),
},
"SummaryOfAnalyses": {
"111065": ("Summary of Analyses", []),
},
"SummaryOfDetections": {
"111064": ("Summary of Detections", []),
},
"SuperficialRetinaStructuralReflectanceMap": {
"128266": ("Superficial retina structural reflectance map", [4271]),
},
"SuperficialRetinaVasculatureFlow": {
"128265": ("Superficial retina vasculature flow", [4271]),
},
"SuperiorLongitudinalFasciculusI": {
"110703": ("superior longitudinal fasciculus I", [7703, 7710, 8134]),
},
"SuperiorLongitudinalFasciculusII": {
"110704": ("superior longitudinal fasciculus II", [7703, 7710, 8134]),
},
"SuperiorLongitudinalFasciculusIII": {
"110705": ("superior longitudinal fasciculus III", [7703, 7710, 8134]),
},
"SuperolateralToInferomedial": {
"111434": ("Superolateral to inferomedial", [6065]),
},
"SuperomedialToInferolateral": {
"111435": ("Superomedial to inferolateral", [6065]),
},
"SupplementaryData": {
"111414": ("Supplementary Data", []),
},
"SupplementaryDataForIntervention": {
"111463": ("Supplementary Data for Intervention", []),
},
"SupportingInformation": {
"112359": ("Supporting Information", []),
},
"SurfaceBetweenInnerAndOuterSegmentsOfThePhotoreceptors": {
"128295": (
"Surface between Inner and Outer Segments of the photoreceptors",
[4273, 7151, 7192, 9514],
),
},
"SurfaceContourMapping": {
"111792": ("Surface contour mapping", [4245]),
},
"SurfaceOfTheCenterOfTheRPE": {
"128298": ("Surface of the center of the RPE", [4273, 7151, 7192, 9514]),
},
"SurfaceOfTheChoroidScleraInterface": {
"128301": ("Surface of the choroid-sclera interface", [4273, 7151, 7192, 9514]),
},
"SurfaceOfTheInterdigitatingZoneBetweenRetinaAndRPE": {
"128296": (
"Surface of the interdigitating zone between retina and RPE",
[4273, 7151, 7192, 9514],
),
},
"SurfaceRendering": {
"113075": ("Surface rendering", [7203]),
},
"Surgeon": {
"121091": ("Surgeon", []),
},
"SurgeonFactor": {
"111773": ("Surgeon Factor", [4237]),
},
"SurgicalConsult": {
"111410": ("Surgical consult", [6028, 6050, 6083]),
},
"Survey": {
"111128": ("Survey", [6051, 6058, 6061]),
},
"SuspendProcedureAction": {
"121132": ("Suspend Procedure Action", [3421]),
},
"SuspendProcedureActionItem": {
"121132": ("Suspend Procedure Action Item", []),
},
"Suvbsa": {
"126403": ("SUVbsa", [218, 7180, 7469]),
},
"Suvbw": {
"126401": ("SUVbw", [218, 7180, 7469]),
},
"Suvibw": {
"126404": ("SUVibw", [218, 7180, 7469]),
},
"Suvlbm": {
"126402": ("SUVlbm", [218, 7180, 7469]),
},
"SuvlbmJames128": {
"126406": ("SUVlbm(James128)", [218, 7180, 7469]),
},
"SuvlbmJanma": {
"126405": ("SUVlbm(Janma)", [218, 7180, 7469]),
},
"SynchronizedRoboticTreatment": {
"130139": ("Synchronized Robotic Treatment", [9523, 9524]),
},
"SyringeAttached": {
"130163": ("Syringe attached", [71]),
},
"SyringeDetached": {
"130164": ("Syringe detached", [71]),
},
"SystemCalculated": {
"113940": ("System Calculated", [10020]),
},
"SystemicFlow": {
"122162": ("Systemic Flow", [3618]),
},
"SystolicBloodVelocityMean": {
"122203": (
"Systolic blood velocity, mean",
[3612, 12261, 12263, 12264, 12266, 12267, 12268, 12276],
),
},
"SystolicBloodVelocityPeak": {
"122204": (
"Systolic blood velocity, peak",
[3612, 12261, 12263, 12264, 12266, 12267, 12268, 12276],
),
},
"SystolicPeakPressure": {
"109033": ("Systolic peak pressure", []),
},
"SystolicPressureAverage": {
"109032": ("Systolic pressure, average", []),
},
"T1": {
"113063": ("T1", [218, 7180, 7469]),
},
"T1ByFixedValue": {
"126352": ("T1 by Fixed Value", [4100, 4106]),
},
"T1ByInversionRecovery": {
"126351": ("T1 by Inversion Recovery", [4100, 4106]),
},
"T1ByMultipleFlipAngles": {
"126350": ("T1 by Multiple Flip Angles", [4100, 4106]),
},
"T1UsedForCalculation": {
"126353": ("T1 Used For Calculation", []),
},
"T1WeightedDynamicContrastEnhancedMRSignalIntensity": {
"110816": (
"T1 Weighted Dynamic Contrast Enhanced MR Signal Intensity",
[218, 7180, 7469],
),
},
"T1WeightedMRSignalIntensity": {
"110804": ("T1 Weighted MR Signal Intensity", [218, 6311, 7180, 7469]),
},
"T1Worst": {
"122367": ("T-1 Worst", [3493]),
},
"T2": {
"113065": ("T2", [218, 7180, 7469]),
},
"T2Secondary": {
"122368": ("T-2 Secondary", [3493]),
},
"T2Star": {
"113064": ("T2*", [218, 7180, 7469]),
},
"T2StarWeightedDynamicContrastEnhancedMRSignalIntensity": {
"110818": (
"T2* Weighted Dynamic Contrast Enhanced MR Signal Intensity",
[218, 7180, 7469],
),
},
"T2StarWeightedMRSignalIntensity": {
"110806": ("T2* Weighted MR Signal Intensity", [218, 7180, 7469]),
},
"T2WeightedDynamicContrastEnhancedMRSignalIntensity": {
"110817": (
"T2 Weighted Dynamic Contrast Enhanced MR Signal Intensity",
[218, 7180, 7469],
),
},
"T2WeightedMRSignalIntensity": {
"110805": ("T2 Weighted MR Signal Intensity", [218, 6311, 7180, 7469]),
},
"T3Secondary": {
"122369": ("T-3 Secondary", [3493]),
},
"T4Secondary": {
"122370": ("T-4 Secondary", [3493]),
},
"T807F18": {
"126502": ("T807 F^18^", [4021]),
},
"TBISetupMethod": {
"130632": ("TBI Setup Method", [9571]),
},
"TEND": {
"113216": ("TEND", [7262]),
},
"TG18ADPattern": {
"109845": ("TG18-AD Pattern", [8301]),
},
"TG18AFCPattern": {
"109861": ("TG18-AFC Pattern", [8301]),
},
"TG18BRPattern": {
"109802": ("TG18-BR Pattern", [8301]),
},
"TG18CHImage": {
"109878": ("TG18-CH Image", [8301]),
},
"TG18CTPattern": {
"109804": ("TG18-CT Pattern", [8301]),
},
"TG18CXPattern": {
"109854": ("TG18-CX Pattern", [8301]),
},
"TG18GA03Pattern": {
"109870": ("TG18-GA03 Pattern", [8301]),
},
"TG18GA05Pattern": {
"109871": ("TG18-GA05 Pattern", [8301]),
},
"TG18GA08Pattern": {
"109872": ("TG18-GA08 Pattern", [8301]),
},
"TG18GA10Pattern": {
"109873": ("TG18-GA10 Pattern", [8301]),
},
"TG18GA15Pattern": {
"109874": ("TG18-GA15 Pattern", [8301]),
},
"TG18GA20Pattern": {
"109875": ("TG18-GA20 Pattern", [8301]),
},
"TG18GA25Pattern": {
"109876": ("TG18-GA25 Pattern", [8301]),
},
"TG18GA30Pattern": {
"109877": ("TG18-GA30 Pattern", [8301]),
},
"TG18GQBPattern": {
"109869": ("TG18-GQB Pattern", [8301]),
},
"TG18GQNPattern": {
"109868": ("TG18-GQN Pattern", [8301]),
},
"TG18GQPattern": {
"109867": ("TG18-GQ Pattern", [8301]),
},
"TG18GVNPattern": {
"109866": ("TG18-GVN Pattern", [8301]),
},
"TG18GVPattern": {
"109865": ("TG18-GV Pattern", [8301]),
},
"TG18KNImage": {
"109879": ("TG18-KN Image", [8301]),
},
"TG18LN1201Pattern": {
"109823": ("TG18-LN12-01 Pattern", [8301]),
},
"TG18LN1202Pattern": {
"109824": ("TG18-LN12-02 Pattern", [8301]),
},
"TG18LN1203Pattern": {
"109825": ("TG18-LN12-03 Pattern", [8301]),
},
"TG18LN1204Pattern": {
"109826": ("TG18-LN12-04 Pattern", [8301]),
},
"TG18LN1205Pattern": {
"109827": ("TG18-LN12-05 Pattern", [8301]),
},
"TG18LN1206Pattern": {
"109828": ("TG18-LN12-06 Pattern", [8301]),
},
"TG18LN1207Pattern": {
"109829": ("TG18-LN12-07 Pattern", [8301]),
},
"TG18LN1208Pattern": {
"109830": ("TG18-LN12-08 Pattern", [8301]),
},
"TG18LN1209Pattern": {
"109831": ("TG18-LN12-09 Pattern", [8301]),
},
"TG18LN1210Pattern": {
"109832": ("TG18-LN12-10 Pattern", [8301]),
},
"TG18LN1211Pattern": {
"109833": ("TG18-LN12-11 Pattern", [8301]),
},
"TG18LN1212Pattern": {
"109834": ("TG18-LN12-12 Pattern", [8301]),
},
"TG18LN1213Pattern": {
"109835": ("TG18-LN12-13 Pattern", [8301]),
},
"TG18LN1214Pattern": {
"109836": ("TG18-LN12-14 Pattern", [8301]),
},
"TG18LN1215Pattern": {
"109837": ("TG18-LN12-15 Pattern", [8301]),
},
"TG18LN1216Pattern": {
"109838": ("TG18-LN12-16 Pattern", [8301]),
},
"TG18LN1217Pattern": {
"109839": ("TG18-LN12-17 Pattern", [8301]),
},
"TG18LN1218Pattern": {
"109840": ("TG18-LN12-18 Pattern", [8301]),
},
"TG18LN801Pattern": {
"109805": ("TG18-LN8-01 Pattern", [8301]),
},
"TG18LN802Pattern": {
"109806": ("TG18-LN8-02 Pattern", [8301]),
},
"TG18LN803Pattern": {
"109807": ("TG18-LN8-03 Pattern", [8301]),
},
"TG18LN804Pattern": {
"109808": ("TG18-LN8-04 Pattern", [8301]),
},
"TG18LN805Pattern": {
"109809": ("TG18-LN8-05 Pattern", [8301]),
},
"TG18LN806Pattern": {
"109810": ("TG18-LN8-06 Pattern", [8301]),
},
"TG18LN807Pattern": {
"109811": ("TG18-LN8-07 Pattern", [8301]),
},
"TG18LN808Pattern": {
"109812": ("TG18-LN8-08 Pattern", [8301]),
},
"TG18LN809Pattern": {
"109813": ("TG18-LN8-09 Pattern", [8301]),
},
"TG18LN810Pattern": {
"109814": ("TG18-LN8-10 Pattern", [8301]),
},
"TG18LN811Pattern": {
"109815": ("TG18-LN8-11 Pattern", [8301]),
},
"TG18LN812Pattern": {
"109816": ("TG18-LN8-12 Pattern", [8301]),
},
"TG18LN813Pattern": {
"109817": ("TG18-LN8-13 Pattern", [8301]),
},
"TG18LN814Pattern": {
"109818": ("TG18-LN8-14 Pattern", [8301]),
},
"TG18LN815Pattern": {
"109819": ("TG18-LN8-15 Pattern", [8301]),
},
"TG18LN816Pattern": {
"109820": ("TG18-LN8-16 Pattern", [8301]),
},
"TG18LN817Pattern": {
"109821": ("TG18-LN8-17 Pattern", [8301]),
},
"TG18LN818Pattern": {
"109822": ("TG18-LN8-18 Pattern", [8301]),
},
"TG18LPH10Pattern": {
"109855": ("TG18-LPH10 Pattern", [8301]),
},
"TG18LPH50Pattern": {
"109856": ("TG18-LPH50 Pattern", [8301]),
},
"TG18LPH89Pattern": {
"109857": ("TG18-LPH89 Pattern", [8301]),
},
"TG18LPV10Pattern": {
"109858": ("TG18-LPV10 Pattern", [8301]),
},
"TG18LPV50Pattern": {
"109859": ("TG18-LPV50 Pattern", [8301]),
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import instance_list
from nova import context
from nova.db.main import api as db
from nova import exception
from nova import objects
from nova import test
class InstanceListTestCase(test.TestCase):
NUMBER_OF_CELLS = 3
def setUp(self):
super(InstanceListTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.num_instances = 3
self.instances = []
start = datetime.datetime(1985, 10, 25, 1, 21, 0)
dt = start
spread = datetime.timedelta(minutes=10)
self.cells = objects.CellMappingList.get_all(self.context)
# Create three instances in each of the real cells. Leave the
# first cell empty to make sure we don't break with an empty
# one.
for cell in self.cells[1:]:
for i in range(0, self.num_instances):
with context.target_cell(self.context, cell) as cctx:
inst = objects.Instance(
context=cctx,
project_id=self.context.project_id,
user_id=self.context.user_id,
created_at=start,
launched_at=dt,
instance_type_id=i,
hostname='%s-inst%i' % (cell.name, i))
inst.create()
if i % 2 == 0:
# Make some faults for this instance
for n in range(0, i + 1):
msg = 'fault%i-%s' % (n, inst.hostname)
f = objects.InstanceFault(context=cctx,
instance_uuid=inst.uuid,
code=i,
message=msg,
details='fake',
host='fakehost')
f.create()
self.instances.append(inst)
im = objects.InstanceMapping(context=self.context,
project_id=inst.project_id,
user_id=inst.user_id,
instance_uuid=inst.uuid,
cell_mapping=cell)
im.create()
dt += spread
def test_get_sorted(self):
filters = {}
limit = None
marker = None
columns = []
sort_keys = ['uuid']
sort_dirs = ['asc']
obj, insts = instance_list.get_instances_sorted(self.context, filters,
limit, marker, columns,
sort_keys, sort_dirs)
uuids = [inst['uuid'] for inst in insts]
self.assertEqual(sorted(uuids), uuids)
self.assertEqual(len(self.instances), len(uuids))
def test_get_sorted_descending(self):
filters = {}
limit = None
marker = None
columns = []
sort_keys = ['uuid']
sort_dirs = ['desc']
obj, insts = instance_list.get_instances_sorted(self.context, filters,
limit, marker, columns,
sort_keys, sort_dirs)
uuids = [inst['uuid'] for inst in insts]
self.assertEqual(list(reversed(sorted(uuids))), uuids)
self.assertEqual(len(self.instances), len(uuids))
def test_get_sorted_with_filter(self):
filters = {'instance_type_id': 1}
limit = None
marker = None
columns = []
sort_keys = ['uuid']
sort_dirs = ['asc']
obj, insts = instance_list.get_instances_sorted(self.context, filters,
limit, marker, columns,
sort_keys, sort_dirs)
uuids = [inst['uuid'] for inst in insts]
expected = [inst['uuid'] for inst in self.instances
if inst['instance_type_id'] == 1]
self.assertEqual(list(sorted(expected)), uuids)
def test_get_sorted_by_defaults(self):
filters = {}
limit = None
marker = None
columns = []
sort_keys = None
sort_dirs = None
obj, insts = instance_list.get_instances_sorted(self.context, filters,
limit, marker, columns,
sort_keys, sort_dirs)
uuids = set([inst['uuid'] for inst in insts])
expected = set([inst['uuid'] for inst in self.instances])
self.assertEqual(expected, uuids)
def test_get_sorted_with_limit(self):
obj, insts = instance_list.get_instances_sorted(self.context, {},
5, None,
[], ['uuid'], ['asc'])
uuids = [inst['uuid'] for inst in insts]
had_uuids = [inst.uuid for inst in self.instances]
self.assertEqual(sorted(had_uuids)[:5], uuids)
self.assertEqual(5, len(uuids))
def test_get_sorted_with_large_limit(self):
obj, insts = instance_list.get_instances_sorted(self.context, {},
5000, None,
[], ['uuid'], ['asc'])
uuids = [inst['uuid'] for inst in insts]
self.assertEqual(sorted(uuids), uuids)
self.assertEqual(len(self.instances), len(uuids))
def test_get_sorted_with_large_limit_batched(self):
obj, insts = instance_list.get_instances_sorted(self.context, {},
5000, None,
[], ['uuid'], ['asc'],
batch_size=2)
uuids = [inst['uuid'] for inst in insts]
self.assertEqual(sorted(uuids), uuids)
self.assertEqual(len(self.instances), len(uuids))
def _test_get_sorted_with_limit_marker(self, sort_by, pages=2, pagesize=2,
sort_dir='asc'):
"""Get multiple pages by a sort key and validate the results.
This requests $pages of $pagesize, followed by a final page with
no limit, and a final-final page which should be empty. It validates
that we got a consistent set of results no patter where the page
boundary is, that we got all the results after the unlimited query,
and that the final page comes back empty when we use the last
instance as a marker.
"""
insts = []
page = 0
while True:
if page >= pages:
# We've requested the specified number of limited (by pagesize)
# pages, so request a penultimate page with no limit which
# should always finish out the result.
limit = None
else:
# Request a limited-size page for the first $pages pages.
limit = pagesize
if insts:
# If we're not on the first page, use the last instance we
# received as the marker
marker = insts[-1]['uuid']
else:
# No marker for the first page
marker = None
batch = list(
instance_list.get_instances_sorted(self.context, {},
limit, marker,
[], [sort_by],
[sort_dir])[1])
if not batch:
# This should only happen when we've pulled the last empty
# page because we used the marker of the last instance. If
# we end up with a non-deterministic ordering, we'd loop
# forever.
break
insts.extend(batch)
page += 1
if page > len(self.instances) * 2:
# Do this sanity check in case we introduce (or find) another
# repeating page bug like #1721791. Without this we loop
# until timeout, which is less obvious.
raise Exception('Infinite paging loop')
# We should have requested exactly (or one more unlimited) pages
self.assertIn(page, (pages, pages + 1))
# Make sure the full set matches what we know to be true
found = [x[sort_by] for x in insts]
had = [x[sort_by] for x in self.instances]
if sort_by in ('launched_at', 'created_at'):
# We're comparing objects and database entries, so we need to
# squash the tzinfo of the object ones so we can compare
had = [x.replace(tzinfo=None) for x in had]
self.assertEqual(len(had), len(found))
if sort_dir == 'asc':
self.assertEqual(sorted(had), found)
else:
self.assertEqual(list(reversed(sorted(had))), found)
def test_get_sorted_with_limit_marker_stable(self):
"""Test sorted by hostname.
This will be a stable sort that won't change on each run.
"""
self._test_get_sorted_with_limit_marker(sort_by='hostname')
def test_get_sorted_with_limit_marker_stable_reverse(self):
"""Test sorted by hostname.
This will be a stable sort that won't change on each run.
"""
self._test_get_sorted_with_limit_marker(sort_by='hostname',
sort_dir='desc')
def test_get_sorted_with_limit_marker_stable_different_pages(self):
"""Test sorted by hostname with different page sizes.
Just do the above with page seams in different places.
"""
self._test_get_sorted_with_limit_marker(sort_by='hostname',
pages=3, pagesize=1)
def test_get_sorted_with_limit_marker_stable_different_pages_reverse(self):
"""Test sorted by hostname with different page sizes.
Just do the above with page seams in different places.
"""
self._test_get_sorted_with_limit_marker(sort_by='hostname',
pages=3, pagesize=1,
sort_dir='desc')
def test_get_sorted_with_limit_marker_random(self):
"""Test sorted by uuid.
This will not be stable and the actual ordering will depend on
uuid generation and thus be different on each run. Do this in
addition to the stable sort above to keep us honest.
"""
self._test_get_sorted_with_limit_marker(sort_by='uuid')
def test_get_sorted_with_limit_marker_random_different_pages(self):
"""Test sorted by uuid with different page sizes.
Just do the above with page seams in different places.
"""
self._test_get_sorted_with_limit_marker(sort_by='uuid',
pages=3, pagesize=2)
def test_get_sorted_with_limit_marker_datetime(self):
"""Test sorted by launched_at.
This tests that we can do all of this, but with datetime
fields.
"""
self._test_get_sorted_with_limit_marker(sort_by='launched_at')
def test_get_sorted_with_limit_marker_datetime_same(self):
"""Test sorted by created_at.
This tests that we can do all of this, but with datetime
fields that are identical.
"""
self._test_get_sorted_with_limit_marker(sort_by='created_at')
def test_get_sorted_with_deleted_marker(self):
marker = self.instances[1]['uuid']
before = list(
instance_list.get_instances_sorted(self.context, {},
None, marker,
[], None, None)[1])
db.instance_destroy(self.context, marker)
after = list(
instance_list.get_instances_sorted(self.context, {},
None, marker,
[], None, None)[1])
self.assertEqual(before, after)
def test_get_sorted_with_invalid_marker(self):
self.assertRaises(exception.MarkerNotFound,
list, instance_list.get_instances_sorted(
self.context, {}, None, 'not-a-marker',
[], None, None)[1])
def test_get_sorted_with_purged_instance(self):
"""Test that we handle a mapped but purged instance."""
im = objects.InstanceMapping(self.context,
instance_uuid=uuids.missing,
project_id=self.context.project_id,
user_id=self.context.user_id,
cell=self.cells[0])
im.create()
self.assertRaises(exception.MarkerNotFound,
list, instance_list.get_instances_sorted(
self.context, {}, None, uuids.missing,
[], None, None)[1])
def _test_get_paginated_with_filter(self, filters):
found_uuids = []
marker = None
while True:
# Query for those instances, sorted by a different key in
# pages of one until we've consumed them all
batch = list(
instance_list.get_instances_sorted(self.context,
filters,
1, marker, [],
['hostname'],
['asc'])[1])
if not batch:
break
found_uuids.extend([x['uuid'] for x in batch])
marker = found_uuids[-1]
return found_uuids
def test_get_paginated_with_uuid_filter(self):
"""Test getting pages with uuid filters.
This runs through the results of a uuid-filtered query in pages of
length one to ensure that we land on markers that are filtered out
of the query and are not accidentally returned.
"""
# Pick a set of the instances by uuid, when sorted by uuid
all_uuids = [x['uuid'] for x in self.instances]
filters = {'uuid': sorted(all_uuids)[:7]}
found_uuids = self._test_get_paginated_with_filter(filters)
# Make sure we found all (and only) the instances we asked for
self.assertEqual(set(found_uuids), set(filters['uuid']))
self.assertEqual(7, len(found_uuids))
def test_get_paginated_with_other_filter(self):
"""Test getting pages with another filter.
This runs through the results of a | |
different conn and cur to avoid error +++096 (old code)
fn_con, acur = litedb.connect(self.xcfg.DB_PATH)
for filepic in sfiles:
# filepic[1] = path for the file from table files
# filepic[2] = set_id from files table
setname = faw.set_name_from_file(filepic[1],
self.xcfg.FILES_DIR,
self.xcfg.FULL_SET_NAME)
aset = None
litedb.execute(fn_con,
'SELECT#098:delete_record_localdb',
lockdb, self.args.processes,
acur,
'SELECT set_id, name FROM sets WHERE name = ?',
qmarkargs=(setname,),
dbcaughtcode='098')
aset = acur.fetchone()
if aset is not None:
set_id = aset[0]
NP.niceprint('Add file to set:[{!s}] '
'set:[{!s}] set_id=[{!s}]'
.format(NP.strunicodeout(filepic[1]),
NP.strunicodeout(setname),
set_id))
self.add_file_to_set(lockdb, set_id, filepic, acur)
else:
NP.niceprint('Not able to assign pic to set',
logalso=logging.ERROR)
logging.debug('===Multiprocessing=== in.mutex.acquire(w)')
mutex.acquire()
running.value += 1
xcount = running.value
mutex.release()
logging.info('===Multiprocessing=== out.mutex.release(w)')
# Show number of files processed so far
NP.niceprocessedfiles(xcount, c_total, False,
msg='Added to Set')
# Closing DB connection
litedb.close(fn_con)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# create_sets
#
def create_sets(self):
""" create_sets
Creates Sets (Album) in Flickr
"""
# [FIND SETS] Find sets to be created
# [PRIMARY PIC] For each set found, determine the primary picture
# [CREATE SET] Create Sets wiht primary picture:
# CODING: what if it is not found?
# [WORK THRU PICS] Split work and add files to set in multi-processing
# ---------------------------------------------------------------------
# Local Variables
#
# slockdb = multiprocessing Lock for access to Database
# smutex = multiprocessing mutex for access to value srunning
# srunning = multiprocessing Value to count processed photos
slockdb = None
smutex = None
srunning = None
NP.niceprint('*****Creating Sets*****')
if self.args.dry_run:
return True
con, cur = litedb.connect(self.xcfg.DB_PATH)
con.create_function("getSet", 3, faw.set_name_from_file)
# Enable traceback return from con.create_function.
litedb.enable_callback_tracebacks(True)
with con:
# List of Sets to be created
litedb.execute(con, 'SELECT#145', slockdb, self.args.processes,
cur,
'SELECT DISTINCT getSet(path, ?, ?) '
'FROM files WHERE getSet(path, ?, ?) '
'NOT IN (SELECT name FROM sets)',
qmarkargs=(self.xcfg.FILES_DIR,
self.xcfg.FULL_SET_NAME,
self.xcfg.FILES_DIR,
self.xcfg.FULL_SET_NAME,),
dbcaughtcode='145')
sets_to_create = cur.fetchall()
for aset in sets_to_create:
# aset[0] = setname
# Find Primary photo
setname = NP.strunicodeout(aset[0])
litedb.execute(con, 'SELECT#156', slockdb, self.args.processes,
cur,
'SELECT MIN(files_id), path '
'FROM files '
'WHERE set_id is NULL '
'AND getSet(path, ?, ?) = ?',
qmarkargs=(self.xcfg.FILES_DIR,
self.xcfg.FULL_SET_NAME,
setname,),
dbcaughtcode='156')
primary_pic = cur.fetchone()
# primary_pic[0] = files_id from files table
set_id = self.create_set(slockdb,
setname, primary_pic[0],
cur, con)
NP.niceprint('Created the set:[{!s}] '
'set_id=[{!s}] '
'primaryId=[{!s}]'
.format(NP.strunicodeout(setname),
set_id,
primary_pic[0]))
litedb.execute(con, 'SELECT#157', slockdb, self.args.processes,
cur,
'SELECT files_id, path, set_id '
'FROM files '
'WHERE set_id is NULL',
dbcaughtcode='157')
files = cur.fetchall()
# running in multi processing mode
if self.args.processes and self.args.processes > 0:
logging.debug('Running [%s] processes pool.',
self.args.processes)
logging.debug('__name__:[%s] to prevent recursive calling)!',
__name__)
# To prevent recursive calling, check if __name__ == '__main__'
# if __name__ == '__main__':
mp.mprocessing(self.args.processes,
slockdb,
srunning,
smutex,
files,
self.fn_add_filestosets,
cur)
# running in single processing mode
else:
for filepic in files:
# filepic[1] = path for the file from table files
# filepic[2] = set_id from files table
setname = faw.set_name_from_file(filepic[1],
self.xcfg.FILES_DIR,
self.xcfg.FULL_SET_NAME)
litedb.execute(con, 'SELECT#158',
slockdb, self.args.processes,
cur,
'SELECT set_id, name '
'FROM sets WHERE name = ?',
qmarkargs=(setname,),
dbcaughtcode='158')
aset = cur.fetchone()
if aset is not None:
set_id = aset[0]
NP.niceprint('Add file to set:[{!s}] '
'set:[{!s}] set_id=[{!s}]'
.format(NP.strunicodeout(filepic[1]),
NP.strunicodeout(setname),
set_id))
self.add_file_to_set(slockdb, set_id, filepic, cur)
else:
NP.niceprint('Not able to assign pic to set.',
logalso=logging.ERROR)
# Closing DB connection
litedb.close(con)
NP.niceprint('*****Completed creating sets*****')
# -------------------------------------------------------------------------
# add_file_to_set
#
def add_file_to_set(self, lock, set_id, file, cur):
""" add_file_to_set
Adds a file to set...
lock = for multiprocessing access control to DB
setID = set
file = file is a list with file[0]=id, file[1]=path
cur = cursor for updating local DB
"""
if self.args.dry_run:
return True
con, bcur = litedb.connect(self.xcfg.DB_PATH)
get_success, _, get_errcode = faw.flickrapi_fn(
self.nuflickr.photosets.addPhoto, (),
dict(photoset_id=str(set_id),
photo_id=str(file[0])),
2, 0, False, caughtcode='159')
if get_success and get_errcode == 0:
NP.niceprint(' Added file/set:[{!s}] set_id:[{!s}]'
.format(NP.strunicodeout(file[1]),
NP.strunicodeout(set_id)))
litedb.execute(con, 'SELECT#159', lock, self.args.processes,
bcur,
'UPDATE files SET set_id = ? '
'WHERE files_id = ?',
qmarkargs=(set_id, file[0]),
dbcaughtcode='159')
elif not get_success and get_errcode == 1:
# Error: 1: Photoset not found
NP.niceprint('Photoset not found, creating new set...')
setname = faw.set_name_from_file(file[1],
self.xcfg.FILES_DIR,
self.xcfg.FULL_SET_NAME)
# CODING: cur vs bcur! Check!
self.create_set(lock, setname, file[0], cur, con)
elif not get_success and get_errcode == 3:
# Error: 3: Photo already in set
NP.niceprint('Photo already in set... updating DB'
'set_id=[{!s}] photo_id=[{!s}]'
.format(set_id, file[0]))
litedb.execute(con, 'SELECT#160', lock, self.args.processes,
bcur,
'UPDATE files SET set_id = ? '
'WHERE files_id = ?',
qmarkargs=(set_id, file[0]),
dbcaughtcode='160')
else:
NP.niceerror(caught=True,
caughtprefix='xxx',
caughtcode='120',
caughtmsg='Failed add photo to set (add_file_to_set)',
useniceprint=True)
# Closing DB connection
if con is not None and con in locals():
logging.warning('Closing DB connection on add_file_to_set.')
litedb.close(con)
# -------------------------------------------------------------------------
# create_set
#
# Creates an Album in Flickr.
#
def create_set(self, lock, setname, primary_photo_id, cur, con):
""" create_set
Creates an Album in Flickr.
Calls log_set_creation to create Album on local database.
"""
NP.niceprint(' Creating set:[{!s}]'
.format(NP.strunicodeout(setname)),
logalso=logging.INFO)
if self.args.dry_run:
return True
get_success, get_result, get_errcode = faw.flickrapi_fn(
self.nuflickr.photosets.create, (),
dict(title=setname,
primary_photo_id=str(primary_photo_id)),
3, 10, True, caughtcode='124')
success = False
if get_success and get_errcode == 0:
logging.warning('get_result["photoset"]["id"]:[%s]',
get_result.find('photoset').attrib['id'])
self.log_set_creation(lock,
get_result.find('photoset').attrib['id'],
setname,
primary_photo_id,
cur,
con)
return get_result.find('photoset').attrib['id']
elif not get_success and get_errcode == 2:
# Add to db the file NOT uploaded
# A set on local DB (with primary photo) failed to be created on
# FLickr because Primary Photo is not available.
# Sets (possibly from previous runs) exist on local DB but the pics
# are not loaded into Flickr.
# FlickrError(u'Error: 2: Invalid primary photo id (nnnnnn)
NP.niceprint('Primary photo [{!s}] for Set [{!s}] '
'does not exist on Flickr. '
'Probably deleted from Flickr but still '
'on local db and local file.'
.format(primary_photo_id,
NP.strunicodeout(setname)),
logalso=logging.ERROR)
success = False
else:
# CODING: Revise code/message output
NP.niceerror(exceptuse=False,
exceptcode=get_result['code']
if 'code' in get_result
else get_result,
exceptmsg=get_result['message']
if 'message' in get_result
else get_result,
useniceprint=True)
success = False
return success
# -------------------------------------------------------------------------
# setup_db
#
# Creates the control database
#
def setup_db(self):
""" setup_db
Creates the control database
"""
NP.niceprint('Setting up database:[{!s}]'.format(self.xcfg.DB_PATH))
con = None
try:
con, cur = litedb.connect(self.xcfg.DB_PATH)
# CODING: Use _success = False to check each step.3
litedb.execute(con, 'CREATE#001:setup_db',
None, self.args.processes, cur,
'CREATE TABLE IF NOT EXISTS files '
'(files_id INT, path TEXT, set_id INT, '
'md5 TEXT, tagged INT)',
dbcaughtcode='001')
litedb.execute(con, 'CREATE#002:setup_db',
None, self.args.processes, cur,
'CREATE TABLE IF NOT EXISTS sets '
'(set_id INT, name TEXT, primary_photo_id INTEGER)',
dbcaughtcode='002')
litedb.execute(con, 'CREATE#003:setup_db',
None, self.args.processes, cur,
'CREATE UNIQUE INDEX IF NOT EXISTS fileindex '
'ON files (path)',
dbcaughtcode='003')
litedb.execute(con, 'CREATE#004:setup_db',
None, self.args.processes, cur,
'CREATE INDEX IF NOT EXISTS setsindex '
'ON sets (name)',
dbcaughtcode='004')
# Check database version.
# [0] = newly created
# [1] = with last_modified column
# [2] = badfiles table added
# [3] = Adding album tags to pics on upload.
# Used in subsequent searches.
litedb.execute(con, 'SELECT#005:setup_db',
None, self.args.processes, cur,
'PRAGMA user_version',
dbcaughtcode='005')
row = cur.fetchone()
if row[0] == 0:
# Database version 1 <=========================DB VERSION: 1===
NP.niceprint('Adding last_modified column to database',
verbosity=1)
cur = con.cursor()
litedb.execute(con, 'PRAGMA#006:setup_db',
None, self.args.processes, cur,
'PRAGMA user_version="1"',
dbcaughtcode='006')
litedb.execute(con, 'ALTER#007:setup_db',
None, self.args.processes, cur,
'ALTER TABLE files '
'ADD COLUMN last_modified REAL',
dbcaughtcode='007')
# Obtain new version to continue updating database
litedb.execute(con, 'PRAGMA#008:setup_db',
None, self.args.processes, cur,
'PRAGMA user_version',
dbcaughtcode='008')
row = cur.fetchone()
if row[0] == 1:
# Database version 2 <=========================DB VERSION: 2===
# Cater for badfiles
NP.niceprint('Adding table badfiles to database',
verbosity=1)
litedb.execute(con, 'PRAGMA#009:setup_db',
None, self.args.processes, cur,
'PRAGMA user_version="2"',
dbcaughtcode='009')
litedb.execute(con, 'CREATE#010:setup_db',
None, self.args.processes, cur,
'CREATE TABLE IF NOT EXISTS badfiles '
'(files_id INTEGER PRIMARY KEY AUTOINCREMENT, '
'path TEXT, set_id INT, md5 TEXT, tagged INT, '
'last_modified REAL)',
dbcaughtcode='010')
litedb.execute(con, 'CREATE#011:setup_db',
None, self.args.processes, cur,
'CREATE UNIQUE INDEX IF NOT EXISTS badfileindex'
' ON badfiles (path)',
dbcaughtcode='011')
cur.execute('CREATE UNIQUE INDEX IF NOT EXISTS badfileindex '
'ON badfiles (path)')
litedb.execute(con, 'PRAGMA#012:setup_db',
None, self.args.processes, cur,
'PRAGMA user_version',
dbcaughtcode='012')
row = cur.fetchone()
if row[0] == 2:
NP.niceprint('Database version: [{!s}]'.format(row[0]))
# Database version 3 <=========================DB VERSION: 3===
NP.niceprint('Adding album tags to pics already uploaded... ')
if self.add_albums_tag():
NP.niceprint('Successfully added album tags to pics '
'already | |
resource_path = '/amendments/{amendment-ID}'.replace('{format}', 'json')
path_params = {}
if 'amendment_id' in params:
path_params['amendment-ID'] = params['amendment_id']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AmendmentPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_amendment_by_state(self, state, **kwargs):
"""
Returns a collection of amendments, specified by the state parameter. By default 10 values are returned. Records are returned in natural order.
{\"nickname\":\"Retrieve by state\",\"response\":\"getAmendmentsByState.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_amendment_by_state(state, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str state: The current state of the amendment, either pending, succeeded, failed or discarded (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:param int offset: The offset from the first amendment to return.
:param int records: The maximum number of amendments to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:return: AmendmentPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_amendment_by_state_with_http_info(state, **kwargs)
else:
(data) = self.get_amendment_by_state_with_http_info(state, **kwargs)
return data
def get_amendment_by_state_with_http_info(self, state, **kwargs):
"""
Returns a collection of amendments, specified by the state parameter. By default 10 values are returned. Records are returned in natural order.
{\"nickname\":\"Retrieve by state\",\"response\":\"getAmendmentsByState.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_amendment_by_state_with_http_info(state, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str state: The current state of the amendment, either pending, succeeded, failed or discarded (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:param int offset: The offset from the first amendment to return.
:param int records: The maximum number of amendments to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:return: AmendmentPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['state', 'organizations', 'offset', 'records', 'order_by', 'order']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_amendment_by_state" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'state' is set
if ('state' not in params) or (params['state'] is None):
raise ValueError("Missing the required parameter `state` when calling `get_amendment_by_state`")
resource_path = '/amendments/state/{state}'.replace('{format}', 'json')
path_params = {}
if 'state' in params:
path_params['state'] = params['state']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
if 'offset' in params:
query_params['offset'] = params['offset']
if 'records' in params:
query_params['records'] = params['records']
if 'order_by' in params:
query_params['order_by'] = params['order_by']
if 'order' in params:
query_params['order'] = params['order']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AmendmentPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_amendment_by_subscription_id(self, subscription_id, **kwargs):
"""
Returns a collection of amendments, specified by the subscription-ID parameter. By default 10 values are returned. Records are returned in natural order.
{\"nickname\":\"Retrieve by subscription\",\"response\":\"getAmendmentsBySubscription.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_amendment_by_subscription_id(subscription_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str subscription_id: ID of the subscription (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:param int offset: The offset from the first amendment to return.
:param int records: The maximum number of amendments to return.
:param str order_by: Specify a field used to order the result set.
:param str order: The direction of any ordering, either ASC or DESC.
:return: AmendmentPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_amendment_by_subscription_id_with_http_info(subscription_id, **kwargs)
else:
(data) = self.get_amendment_by_subscription_id_with_http_info(subscription_id, **kwargs)
return data
def get_amendment_by_subscription_id_with_http_info(self, subscription_id, **kwargs):
"""
Returns a collection of amendments, specified by the subscription-ID parameter. By default 10 values are returned. Records are returned in natural order.
{\"nickname\":\"Retrieve by subscription\",\"response\":\"getAmendmentsBySubscription.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_amendment_by_subscription_id_with_http_info(subscription_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str subscription_id: ID of the subscription (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:param int offset: The offset from the first amendment to return.
:param int records: The maximum number of amendments to return.
:param str order_by: Specify a field used to order the result set.
:param str order: The direction of any ordering, either ASC or DESC.
:return: AmendmentPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['subscription_id', 'organizations', 'offset', 'records', 'order_by', 'order']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_amendment_by_subscription_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'subscription_id' is set
if ('subscription_id' not in params) or (params['subscription_id'] is None):
raise ValueError("Missing the required parameter `subscription_id` when calling `get_amendment_by_subscription_id`")
resource_path = '/amendments/subscription/{subscription-ID}'.replace('{format}', 'json')
path_params = {}
if 'subscription_id' in params:
path_params['subscription-ID'] = params['subscription_id']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
if 'offset' in params:
query_params['offset'] = params['offset']
if 'records' in params:
query_params['records'] = params['records']
if 'order_by' in params:
query_params['order_by'] = params['order_by']
if 'order' in params:
query_params['order'] = params['order']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AmendmentPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_amendment_swagger(self, query_string, **kwargs):
"""
{\"nickname\":\"\",\"response\":\"\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_amendment_swagger(query_string, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str query_string: The query string used to search. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:param int offset: The starting index of the search results.
:param int records: The number of search results to return.
:param bool wildcard: Toggle if we search for full words or whether a wildcard is used.
:param bool entity: Is an entity returned with the search results.
:return: SwaggerTypeList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_amendment_swagger_with_http_info(query_string, **kwargs)
else:
(data) = self.get_amendment_swagger_with_http_info(query_string, **kwargs)
return data
def get_amendment_swagger_with_http_info(self, query_string, **kwargs):
"""
{\"nickname\":\"\",\"response\":\"\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP | |
"""This module contains the backend implementation for tf Agents (see https://github.com/tensorflow/agents)"""
from abc import ABCMeta
from typing import Dict, Type
import math
import os
# noinspection PyUnresolvedReferences
import easyagents.agents
from easyagents import core
from easyagents.backends import core as bcore
from easyagents.backends import monitor
# noinspection PyPackageRequirements
import tensorflow as tf
from tf_agents.agents.ddpg import critic_network
from tf_agents.agents.dqn import dqn_agent
from tf_agents.agents.ppo import ppo_agent
from tf_agents.agents.reinforce import reinforce_agent
from tf_agents.agents.sac import sac_agent
from tf_agents.drivers import dynamic_step_driver
from tf_agents.drivers.dynamic_episode_driver import DynamicEpisodeDriver
from tf_agents.environments import gym_wrapper, py_environment, tf_py_environment
from tf_agents.networks import actor_distribution_network, normal_projection_network, q_network, value_network
from tf_agents.policies import greedy_policy, tf_policy, random_tf_policy, policy_saver
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.replay_buffers.tf_uniform_replay_buffer import TFUniformReplayBuffer
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
import gym
# noinspection PyUnresolvedReferences,PyAbstractClass
class TfAgent(bcore.BackendAgent, metaclass=ABCMeta):
"""Reinforcement learning agents based on googles tf_agent implementations
https://github.com/tensorflow/agents
"""
def __init__(self, model_config: core.ModelConfig):
super().__init__(model_config=model_config,
backend_name=TfAgentAgentFactory.backend_name,
tf_eager_execution=True)
self._trained_policy = None
self._play_env: Optional[gym.Env] = None
def _create_gym_with_wrapper(self, discount):
gym_spec = gym.spec(self.model_config.gym_env_name)
gym_env = gym_spec.make()
# simplify_box_bounds: Whether to replace bounds of Box space that are arrays
# with identical values with one number and rely on broadcasting.
# important, simplify_box_bounds True crashes environments with boundaries with identical values
env = gym_wrapper.GymWrapper(
gym_env,
discount=discount,
simplify_box_bounds=False)
return env
def _create_env(self, discount: float = 1) -> tf_py_environment.TFPyEnvironment:
""" creates a new instance of the gym environment and wraps it in a tfagent TFPyEnvironment
Args:
discount: the reward discount factor
"""
assert 0 < discount <= 1, "discount not admissible"
self.log_api(f'TFPyEnvironment', f'( suite_gym.load( "{self.model_config.original_env_name}", discount={discount}) )')
# suit_gym.load crashes our environment
# py_env = suite_gym.load(self.model_config.gym_env_name, discount=discount)
py_env = self._create_gym_with_wrapper(discount)
result = tf_py_environment.TFPyEnvironment(py_env)
return result
def _get_gym_env(self, tf_py_env: tf_py_environment.TFPyEnvironment) -> monitor._MonitorEnv:
""" extracts the underlying _MonitorEnv from tf_py_env created by _create_tfagent_env"""
assert isinstance(tf_py_env, tf_py_environment.TFPyEnvironment), \
"passed tf_py_env is not an instance of TFPyEnvironment"
assert isinstance(tf_py_env.pyenv, py_environment.PyEnvironment), \
"passed TFPyEnvironment.pyenv does not contain a PyEnvironment"
assert len(tf_py_env.pyenv.envs) == 1, "passed TFPyEnvironment.pyenv does not contain a unique environment"
result = tf_py_env.pyenv.envs[0].gym
assert isinstance(result, monitor._MonitorEnv), "passed TFPyEnvironment does not contain a _MonitorEnv"
return result
def play_implementation(self, play_context: core.PlayContext):
"""Agent specific implementation of playing a single episodes with the current policy.
Args:
play_context: play configuration to be used
"""
assert play_context, "play_context not set."
assert self._trained_policy, "trained_policy not set. call train() first."
if self._play_env is None:
self._play_env = self._create_env()
gym_env = self._get_gym_env(self._play_env)
while True:
self.on_play_episode_begin(env=gym_env)
time_step = self._play_env.reset()
while not time_step.is_last():
action_step = self._trained_policy.action(time_step)
time_step = self._play_env.step(action_step.action)
self.on_play_episode_end()
if play_context.play_done:
break
def load_implementation(self, directory: str):
"""Loads a previously saved actor policy from the directory
Args:
directory: the directory to load the policy from.
"""
assert directory
self.log_api('saved_model.load', f'({directory})')
self._trained_policy = tf.compat.v2.saved_model.load(directory)
def save_implementation(self, directory: str):
"""Saves the trained actor policy in directory.
If no policy was trained yet, no file is written
.
Args:
directory: the directory to save the policy weights to.
"""
assert self._trained_policy, "no policy trained yet."
self.log_api('PolicySaver', f'(trained_policy,seed={self.model_config.seed})')
saver = policy_saver.PolicySaver(self._trained_policy, seed=self.model_config.seed)
self.log_api('policy_saver.save', f'({directory})')
saver.save(directory)
# noinspection PyUnresolvedReferences
class TfDqnAgent(TfAgent):
""" creates a new agent based on the DQN algorithm using the tfagents implementation.
Args:
model_config: the model configuration including the name of the target gym environment
as well as the neural network architecture.
"""
def __init__(self, model_config: core.ModelConfig):
super().__init__(model_config=model_config)
def collect_step(self, env: tf_py_environment.TFPyEnvironment, policy: tf_policy.Base,
replay_buffer: TFUniformReplayBuffer):
time_step = env.current_time_step()
action_step = policy.action(time_step)
next_time_step = env.step(action_step.action)
traj = trajectory.from_transition(time_step, action_step, next_time_step)
replay_buffer.add_batch(traj)
# noinspection DuplicatedCode
def train_implementation(self, train_context: core.TrainContext):
"""Tf-Agents Ppo Implementation of the train loop.
The implementation follows
https://colab.research.google.com/github/tensorflow/agents/blob/master/tf_agents/colabs/1_dqn_tutorial.ipynb
"""
assert isinstance(train_context, core.StepsTrainContext)
dc: core.StepsTrainContext = train_context
train_env = self._create_env(discount=dc.reward_discount_gamma)
observation_spec = train_env.observation_spec()
action_spec = train_env.action_spec()
timestep_spec = train_env.time_step_spec()
# SetUp Optimizer, Networks and DqnAgent
self.log_api('AdamOptimizer', f'(learning_rate={dc.learning_rate})')
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=dc.learning_rate)
self.log_api('QNetwork', f'(observation_spec, action_spec, fc_layer_params={self.model_config.fc_layers})')
q_net = q_network.QNetwork(observation_spec, action_spec, fc_layer_params=self.model_config.fc_layers)
self.log_api('DqnAgent', '(timestep_spec,action_spec,q_network=..., optimizer=...,td_errors_loss_fn=common.element_wise_squared_loss)')
tf_agent = dqn_agent.DqnAgent(timestep_spec, action_spec,
q_network=q_net, optimizer=optimizer,
td_errors_loss_fn=common.element_wise_squared_loss)
self.log_api('tf_agent.initialize', f'()')
tf_agent.initialize()
self._trained_policy = tf_agent.policy
# SetUp Data collection & Buffering
self.log_api('TFUniformReplayBuffer', f'(data_spec=..., batch_size={train_env.batch_size}, max_length={dc.max_steps_in_buffer})')
replay_buffer = TFUniformReplayBuffer(data_spec=tf_agent.collect_data_spec,
batch_size=train_env.batch_size,
max_length=dc.max_steps_in_buffer)
self.log_api('RandomTFPolicy', '()')
random_policy = random_tf_policy.RandomTFPolicy(timestep_spec, action_spec)
self.log_api('replay_buffer.add_batch', '(trajectory)')
for _ in range(dc.num_steps_buffer_preload):
self.collect_step(env=train_env, policy=random_policy, replay_buffer=replay_buffer)
# Train
self.log_api('tf_agent.train', '= common.function(tf_agent.train)')
tf_agent.train = common.function(tf_agent.train)
self.log_api('replay_buffer.as_dataset', f'(num_parallel_calls=3, ' +
f'sample_batch_size={dc.num_steps_sampled_from_buffer}, num_steps=2).prefetch(3)')
dataset = replay_buffer.as_dataset(num_parallel_calls=3, sample_batch_size=dc.num_steps_sampled_from_buffer,
num_steps=2).prefetch(3)
self.log_api('iter(dataset',f'{iter(dataset)}')
iter_dataset = iter(dataset)
self.log_api('for each iteration')
self.log_api(' replay_buffer.add_batch', '(trajectory)')
self.log_api(' tf_agent.train', '(experience=trajectory)')
while True:
self.on_train_iteration_begin()
for _ in range(dc.num_steps_per_iteration):
self.collect_step(env=train_env, policy=tf_agent.collect_policy, replay_buffer=replay_buffer)
trajectories, _ = next(iter_dataset)
tf_loss_info = tf_agent.train(experience=trajectories)
self.on_train_iteration_end(tf_loss_info.loss)
if train_context.training_done:
break
return
# noinspection PyUnresolvedReferences
class TfPpoAgent(TfAgent):
""" creates a new agent based on the PPO algorithm using the tfagents implementation.
PPO is an actor-critic algorithm using 2 neural networks. The actor network
to predict the next action to be taken and the critic network to estimate
the value of the game state we are currently in (the expected, discounted
sum of future rewards when following the current actor network).
Args:
model_config: the model configuration including the name of the target gym environment
as well as the neural network architecture.
"""
def __init__(self, model_config: core.ModelConfig):
super().__init__(model_config=model_config)
# noinspection DuplicatedCode
def train_implementation(self, train_context: core.TrainContext):
"""Tf-Agents Ppo Implementation of the train loop."""
assert isinstance(train_context, core.PpoTrainContext)
tc: core.PpoTrainContext = train_context
train_env = self._create_env(discount=tc.reward_discount_gamma)
observation_spec = train_env.observation_spec()
action_spec = train_env.action_spec()
timestep_spec = train_env.time_step_spec()
# SetUp Optimizer, Networks and PpoAgent
self.log_api('AdamOptimizer', '()')
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=tc.learning_rate)
self.log_api('ActorDistributionNetwork', '()')
actor_net = actor_distribution_network.ActorDistributionNetwork(observation_spec, action_spec,
fc_layer_params=self.model_config.fc_layers)
self.log_api('ValueNetwork', '()')
value_net = value_network.ValueNetwork(observation_spec, fc_layer_params=self.model_config.fc_layers)
self.log_api('PpoAgent', '()')
tf_agent = ppo_agent.PPOAgent(timestep_spec, action_spec, optimizer,
actor_net=actor_net, value_net=value_net,
num_epochs=tc.num_epochs_per_iteration)
self.log_api('tf_agent.initialize', '()')
tf_agent.initialize()
self._trained_policy = tf_agent.policy
# SetUp Data collection & Buffering
collect_data_spec = tf_agent.collect_data_spec
self.log_api('TFUniformReplayBuffer', '()')
replay_buffer = TFUniformReplayBuffer(collect_data_spec,
batch_size=1, max_length=tc.max_steps_in_buffer)
collect_policy = tf_agent.collect_policy
self.log_api('DynamicEpisodeDriver', '()')
collect_driver = DynamicEpisodeDriver(train_env, collect_policy, observers=[replay_buffer.add_batch],
num_episodes=tc.num_episodes_per_iteration)
# Train
collect_driver.run = common.function(collect_driver.run, autograph=False)
tf_agent.train = common.function(tf_agent.train, autograph=False)
while True:
self.on_train_iteration_begin()
self.log_api('-----', f'iteration {tc.iterations_done_in_training:4} of {tc.num_iterations:<4} -----')
self.log_api('collect_driver.run', '()')
collect_driver.run()
self.log_api('replay_buffer.gather_all', '()')
trajectories = replay_buffer.gather_all()
self.log_api('tf_agent.train', '(experience=...)')
loss_info = tf_agent.train(experience=trajectories)
total_loss = loss_info.loss.numpy()
actor_loss = loss_info.extra.policy_gradient_loss.numpy()
critic_loss = loss_info.extra.value_estimation_loss.numpy()
self.log_api('', f'loss={total_loss:<7.1f} [actor={actor_loss:<7.1f} critic={critic_loss:<7.1f}]')
self.log_api('replay_buffer.clear', '()')
replay_buffer.clear()
self.on_train_iteration_end(loss=total_loss, actor_loss=actor_loss, critic_loss=critic_loss)
if tc.training_done:
break
return
# noinspection PyUnresolvedReferences
class TfRandomAgent(TfAgent):
""" creates a new random agent based on uniform random actions.
Args:
model_config: the model configuration including the name of the target gym environment
as well as the neural network architecture.
"""
def __init__(self, model_config: core.ModelConfig):
super().__init__(model_config=model_config)
self._set_trained_policy()
def _set_trained_policy(self):
"""Tf-Agents Random Implementation of the train loop."""
self.log('Creating environment...')
train_env = self._create_env()
action_spec = train_env.action_spec()
timestep_spec = train_env.time_step_spec()
self.log_api('RandomTFPolicy', 'create')
self._trained_policy = random_tf_policy.RandomTFPolicy(timestep_spec, action_spec)
self._agent_context._is_policy_trained = True
def load_implementation(self, directory: str):
"""NoOps implementation, since we don't save/load random policies."""
pass
def save_implementation(self, directory: str):
"""NoOps implementation, since we don't save/load random policies."""
pass
# noinspection DuplicatedCode
def train_implementation(self, train_context: core.TrainContext):
self.log("Training...")
train_env = self._create_env()
while True:
self.on_train_iteration_begin()
# ensure that 1 episode is played during the iteration
time_step = train_env.reset()
while not time_step.is_last():
action_step = self._trained_policy.action(time_step)
time_step = train_env.step(action_step.action)
self.on_train_iteration_end(math.nan)
if train_context.training_done:
break
return
# noinspection PyUnresolvedReferences
class TfReinforceAgent(TfAgent):
""" creates a new agent based on the Reinforce algorithm using the tfagents implementation.
Reinforce is a vanilla policy gradient algorithm using a single neural networks to predict
the actions.
Args:
model_config: the model configuration including the name of the target gym environment
as well as the neural network architecture.
"""
def __init__(self, model_config: core.ModelConfig):
super().__init__(model_config=model_config)
# noinspection DuplicatedCode
def train_implementation(self, train_context: core.TrainContext):
"""Tf-Agents Reinforce Implementation of the train loop."""
assert isinstance(train_context, core.EpisodesTrainContext)
tc: core.EpisodesTrainContext = train_context
self.log('Creating environment...')
train_env = self._create_env(discount=tc.reward_discount_gamma)
observation_spec = train_env.observation_spec()
action_spec = train_env.action_spec()
timestep_spec = train_env.time_step_spec()
# SetUp Optimizer, Networks and PpoAgent
self.log_api('AdamOptimizer', 'create')
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=tc.learning_rate)
self.log_api('ActorDistributionNetwork', 'create')
actor_net = actor_distribution_network.ActorDistributionNetwork(observation_spec, action_spec,
fc_layer_params=self.model_config.fc_layers)
self.log_api('ReinforceAgent', 'create')
tf_agent = reinforce_agent.ReinforceAgent(timestep_spec, action_spec, actor_network=actor_net,
optimizer=optimizer)
self.log_api('tf_agent.initialize()')
tf_agent.initialize()
self._trained_policy = tf_agent.policy
# SetUp Data collection & Buffering
collect_data_spec = tf_agent.collect_data_spec
self.log_api('TFUniformReplayBuffer', 'create')
replay_buffer = TFUniformReplayBuffer(collect_data_spec, batch_size=1, max_length=tc.max_steps_in_buffer)
self.log_api('DynamicEpisodeDriver', 'create')
collect_driver = DynamicEpisodeDriver(train_env, tf_agent.collect_policy,
observers=[replay_buffer.add_batch],
num_episodes=tc.num_episodes_per_iteration)
# Train
collect_driver.run = common.function(collect_driver.run, autograph=False)
tf_agent.train = common.function(tf_agent.train, autograph=False)
self.log('Starting training...')
while True:
self.on_train_iteration_begin()
msg = f'iteration {tc.iterations_done_in_training:4} of {tc.num_iterations:<4}'
self.log_api('collect_driver.run', msg)
collect_driver.run()
self.log_api('replay_buffer.gather_all', msg)
trajectories = replay_buffer.gather_all()
self.log_api('tf_agent.train', msg)
loss_info = tf_agent.train(experience=trajectories)
total_loss = loss_info.loss.numpy()
self.log_api('', f'loss={total_loss:<7.1f}')
self.log_api('replay_buffer.clear', msg)
replay_buffer.clear()
self.on_train_iteration_end(loss=total_loss)
if tc.training_done:
break
return
# noinspection PyUnresolvedReferences
class TfSacAgent(TfAgent):
""" creates a new agent based on the SAC algorithm using the tfagents implementation.
adapted from
https://github.com/tensorflow/agents/blob/master/tf_agents/colabs/7_SAC_minitaur_tutorial.ipynb
Args:
model_config: the model configuration including the name of the target gym environment
| |
assert (particle_index == check_index ), "Attempting to add incorrect particle to hybrid system"
# Charge will be turned off at lambda_electrostatics_delete = 0, on at lambda_electrostatics_delete = 1; kill charge with lambda_electrostatics_delete = 0 --> 1
self._hybrid_system_forces['standard_nonbonded_force'].addParticleParameterOffset('lambda_electrostatics_delete', particle_index, -charge, 0*sigma, 0*epsilon)
elif particle_index in self._atom_classes['unique_new_atoms']:
_logger.debug(f"\t\thandle_nonbonded: particle {particle_index} is a unique_new")
#get the parameters in the new system
new_index = hybrid_to_new_map[particle_index]
[charge, sigma, epsilon] = new_system_nonbonded_force.getParticleParameters(new_index)
#add the particle to the hybrid custom sterics and electrostatics
check_index = self._hybrid_system_forces['core_sterics_force'].addParticle([sigma, 0.0*epsilon, sigma, epsilon, 0, 1]) # turning on sterics in forward direction
assert (particle_index == check_index ), "Attempting to add incorrect particle to hybrid system"
# Add particle to the regular nonbonded force, but Lennard-Jones will be handled by CustomNonbondedForce
check_index = self._hybrid_system_forces['standard_nonbonded_force'].addParticle(0.0, sigma, 0.0) #charge starts at zero
assert (particle_index == check_index ), "Attempting to add incorrect particle to hybrid system"
# Charge will be turned off at lambda_electrostatics_insert = 0, on at lambda_electrostatics_insert = 1; add charge with lambda_electrostatics_insert = 0 --> 1
self._hybrid_system_forces['standard_nonbonded_force'].addParticleParameterOffset('lambda_electrostatics_insert', particle_index, +charge, 0, 0)
elif particle_index in self._atom_classes['core_atoms']:
_logger.debug(f"\t\thandle_nonbonded: particle {particle_index} is a core")
#get the parameters in the new and old systems:
old_index = hybrid_to_old_map[particle_index]
[charge_old, sigma_old, epsilon_old] = old_system_nonbonded_force.getParticleParameters(old_index)
new_index = hybrid_to_new_map[particle_index]
[charge_new, sigma_new, epsilon_new] = new_system_nonbonded_force.getParticleParameters(new_index)
#add the particle to the custom forces, interpolating between the two parameters; add steric params and zero electrostatics to core_sterics per usual
check_index = self._hybrid_system_forces['core_sterics_force'].addParticle([sigma_old, epsilon_old, sigma_new, epsilon_new, 0, 0])
assert (particle_index == check_index ), "Attempting to add incorrect particle to hybrid system"
#still add the particle to the regular nonbonded force, but with zeroed out parameters; add old charge to standard_nonbonded and zero sterics
check_index = self._hybrid_system_forces['standard_nonbonded_force'].addParticle(charge_old, 0.5*(sigma_old+sigma_new), 0.0)
assert (particle_index == check_index ), "Attempting to add incorrect particle to hybrid system"
# Charge is charge_old at lambda_electrostatics = 0, charge_new at lambda_electrostatics = 1
# TODO: We could also interpolate the Lennard-Jones here instead of core_sterics force so that core_sterics_force could just be softcore
# interpolate between old and new charge with lambda_electrostatics core; make sure to keep sterics off
self._hybrid_system_forces['standard_nonbonded_force'].addParticleParameterOffset('lambda_electrostatics_core', particle_index, (charge_new - charge_old), 0, 0)
#otherwise, the particle is in the environment
else:
_logger.debug(f"\t\thandle_nonbonded: particle {particle_index} is an envronment")
#the parameters will be the same in new and old system, so just take the old parameters
old_index = hybrid_to_old_map[particle_index]
[charge, sigma, epsilon] = old_system_nonbonded_force.getParticleParameters(old_index)
#add the particle to the hybrid custom sterics, but they dont change; electrostatics are ignored
self._hybrid_system_forces['core_sterics_force'].addParticle([sigma, epsilon, sigma, epsilon, 0, 0])
#add the environment atoms to the regular nonbonded force as well: should we be adding steric terms here, too?
self._hybrid_system_forces['standard_nonbonded_force'].addParticle(charge, sigma, epsilon)
# Now loop pairwise through (unique_old, unique_new) and add exceptions so that they never interact electrostatically (place into Nonbonded Force)
unique_old_atoms = self._atom_classes['unique_old_atoms']
unique_new_atoms = self._atom_classes['unique_new_atoms']
for old in unique_old_atoms:
for new in unique_new_atoms:
self._hybrid_system_forces['standard_nonbonded_force'].addException(old, new, 0.0*unit.elementary_charge**2, 1.0*unit.nanometers, 0.0*unit.kilojoules_per_mole)
self._hybrid_system_forces['core_sterics_force'].addExclusion(old, new) #this is only necessary to avoid the 'All forces must have identical exclusions' rule
_logger.info("\thandle_nonbonded: Handling Interaction Groups...")
self._handle_interaction_groups()
_logger.info("\thandle_nonbonded: Handling Hybrid Exceptions...")
self._handle_hybrid_exceptions()
_logger.info("\thandle_nonbonded: Handling Original Exceptions...")
self._handle_original_exceptions()
def _generate_dict_from_exceptions(self, force):
"""
This is a utility function to generate a dictionary of the form
(particle1_idx, particle2_idx) : [exception parameters]. This will facilitate access and search of exceptions
Parameters
----------
force : openmm.NonbondedForce object
a force containing exceptions
Returns
-------
exceptions_dict : dict
Dictionary of exceptions
"""
exceptions_dict = {}
for exception_index in range(force.getNumExceptions()):
[index1, index2, chargeProd, sigma, epsilon] = force.getExceptionParameters(exception_index)
exceptions_dict[(index1, index2)] = [chargeProd, sigma, epsilon]
_logger.debug(f"\t_generate_dict_from_exceptions: Exceptions Dict: {exceptions_dict}" )
return exceptions_dict
def _handle_interaction_groups(self):
"""
Create the appropriate interaction groups for the custom nonbonded forces. The groups are:
1) Unique-old - core
2) Unique-old - environment
3) Unique-new - core
4) Unique-new - environment
5) Core - environment
6) Core - core
Unique-old and Unique new are prevented from interacting this way, and intra-unique interactions occur in an
unmodified nonbonded force.
Must be called after particles are added to the Nonbonded forces
TODO: we should also be adding the following interaction groups...
7) Unique-new - Unique-new
8) Unique-old - Unique-old
"""
#get the force objects for convenience:
sterics_custom_force = self._hybrid_system_forces['core_sterics_force']
#also prepare the atom classes
core_atoms = self._atom_classes['core_atoms']
unique_old_atoms = self._atom_classes['unique_old_atoms']
unique_new_atoms = self._atom_classes['unique_new_atoms']
environment_atoms = self._atom_classes['environment_atoms']
sterics_custom_force.addInteractionGroup(unique_old_atoms, core_atoms)
sterics_custom_force.addInteractionGroup(unique_old_atoms, environment_atoms)
sterics_custom_force.addInteractionGroup(unique_new_atoms, core_atoms)
sterics_custom_force.addInteractionGroup(unique_new_atoms, environment_atoms)
sterics_custom_force.addInteractionGroup(core_atoms, environment_atoms)
sterics_custom_force.addInteractionGroup(core_atoms, core_atoms)
sterics_custom_force.addInteractionGroup(unique_new_atoms, unique_new_atoms)
sterics_custom_force.addInteractionGroup(unique_old_atoms, unique_old_atoms)
def _handle_hybrid_exceptions(self):
"""
Instead of excluding interactions that shouldn't occur, we provide exceptions for interactions that were zeroed
out but should occur.
Returns
-------
"""
old_system_nonbonded_force = self._old_system_forces['NonbondedForce']
new_system_nonbonded_force = self._new_system_forces['NonbondedForce']
import itertools
#prepare the atom classes
unique_old_atoms = self._atom_classes['unique_old_atoms']
unique_new_atoms = self._atom_classes['unique_new_atoms']
#get the list of interaction pairs for which we need to set exceptions:
unique_old_pairs = list(itertools.combinations(unique_old_atoms, 2))
unique_new_pairs = list(itertools.combinations(unique_new_atoms, 2))
#add back the interactions of the old unique atoms, unless there are exceptions
for atom_pair in unique_old_pairs:
#since the pairs are indexed in the dictionary by the old system indices, we need to convert
old_index_atom_pair = (self._hybrid_to_old_map[atom_pair[0]], self._hybrid_to_old_map[atom_pair[1]])
#now we check if the pair is in the exception dictionary
if old_index_atom_pair in self._old_system_exceptions:
_logger.debug(f"\t\thandle_nonbonded: _handle_hybrid_exceptions: {old_index_atom_pair} is an old system exception")
[chargeProd, sigma, epsilon] = self._old_system_exceptions[old_index_atom_pair]
if self._interpolate_14s: #if we are interpolating 1,4 exceptions then we have to
self._hybrid_system_forces['standard_nonbonded_force'].addException(atom_pair[0], atom_pair[1], chargeProd*0.0, sigma, epsilon*0.0)
else:
self._hybrid_system_forces['standard_nonbonded_force'].addException(atom_pair[0], atom_pair[1], chargeProd, sigma, epsilon)
self._hybrid_system_forces['core_sterics_force'].addExclusion(atom_pair[0], atom_pair[1]) # add exclusion to ensure exceptions are consistent
#check if the pair is in the reverse order and use that if so
elif old_index_atom_pair[::-1] in self._old_system_exceptions:
_logger.debug(f"\t\thandle_nonbonded: _handle_hybrid_exceptions: {old_index_atom_pair[::-1]} is an old system exception")
[chargeProd, sigma, epsilon] = self._old_system_exceptions[old_index_atom_pair[::-1]]
if self._interpolate_14s: #if we are interpolating 1,4 exceptions then we have to
self._hybrid_system_forces['standard_nonbonded_force'].addException(atom_pair[0], atom_pair[1], chargeProd*0.0, sigma, epsilon*0.0)
else:
self._hybrid_system_forces['standard_nonbonded_force'].addException(atom_pair[0], atom_pair[1], chargeProd, sigma, epsilon)
self._hybrid_system_forces['core_sterics_force'].addExclusion(atom_pair[0], atom_pair[1]) # add exclusion to ensure exceptions are consistent
#If it's not handled by an exception in the original system, we just add the regular parameters as an exception
# TODO: this implies that the old-old nonbonded interactions (those which are not exceptions) are always self-interacting throughout lambda protocol...
# else:
# _logger.info(f"\t\thandle_nonbonded: _handle_hybrid_exceptions: {old_index_atom_pair} is NOT an old exception...perhaps this is a problem!")
# [charge0, sigma0, epsilon0] = self._old_system_forces['NonbondedForce'].getParticleParameters(old_index_atom_pair[0])
# [charge1, sigma1, epsilon1] = self._old_system_forces['NonbondedForce'].getParticleParameters(old_index_atom_pair[1])
# chargeProd = charge0*charge1
# epsilon = unit.sqrt(epsilon0*epsilon1)
# sigma = 0.5*(sigma0+sigma1)
# self._hybrid_system_forces['standard_nonbonded_force'].addException(atom_pair[0], atom_pair[1], chargeProd, sigma, epsilon)
# self._hybrid_system_forces['core_sterics_force'].addExclusion(atom_pair[0], atom_pair[1]) # add exclusion to ensure exceptions are consistent
#add back the interactions of the new unique atoms, unless there are exceptions
for atom_pair in unique_new_pairs:
#since the pairs are indexed in the dictionary by the new system indices, we need to convert
new_index_atom_pair = (self._hybrid_to_new_map[atom_pair[0]], self._hybrid_to_new_map[atom_pair[1]])
#now we check if the pair is in the exception dictionary
if new_index_atom_pair in self._new_system_exceptions:
_logger.debug(f"\t\thandle_nonbonded: _handle_hybrid_exceptions: {new_index_atom_pair} is a new system exception")
[chargeProd, sigma, epsilon] = self._new_system_exceptions[new_index_atom_pair]
if self._interpolate_14s:
self._hybrid_system_forces['standard_nonbonded_force'].addException(atom_pair[0], atom_pair[1], chargeProd*0.0, sigma, epsilon*0.0)
else:
self._hybrid_system_forces['standard_nonbonded_force'].addException(atom_pair[0], atom_pair[1], chargeProd, sigma, epsilon)
self._hybrid_system_forces['core_sterics_force'].addExclusion(atom_pair[0], atom_pair[1])
#check if the pair is present in the reverse order and use that if so
elif new_index_atom_pair[::-1] in self._new_system_exceptions:
_logger.debug(f"\t\thandle_nonbonded: _handle_hybrid_exceptions: {new_index_atom_pair[::-1]} is a new system exception")
[chargeProd, sigma, epsilon] = self._new_system_exceptions[new_index_atom_pair[::-1]]
if self._interpolate_14s:
self._hybrid_system_forces['standard_nonbonded_force'].addException(atom_pair[0], atom_pair[1], chargeProd*0.0, sigma, epsilon*0.0)
else:
self._hybrid_system_forces['standard_nonbonded_force'].addException(atom_pair[0], atom_pair[1], chargeProd, sigma, epsilon)
self._hybrid_system_forces['core_sterics_force'].addExclusion(atom_pair[0], atom_pair[1])
#If it's not handled by an exception in the original system, we just add the regular parameters as an exception
# else:
# _logger.info(f"\t\thandle_nonbonded: _handle_hybrid_exceptions: {new_index_atom_pair} is NOT a new exception...perhaps this is a problem!")
# [charge0, sigma0, epsilon0] = self._new_system_forces['NonbondedForce'].getParticleParameters(new_index_atom_pair[0])
# [charge1, sigma1, epsilon1] = self._new_system_forces['NonbondedForce'].getParticleParameters(new_index_atom_pair[1])
# chargeProd = charge0*charge1
# epsilon = unit.sqrt(epsilon0*epsilon1)
# sigma = 0.5*(sigma0+sigma1)
# self._hybrid_system_forces['standard_nonbonded_force'].addException(atom_pair[0], atom_pair[1], chargeProd, sigma, epsilon)
# self._hybrid_system_forces['core_sterics_force'].addExclusion(atom_pair[0], atom_pair[1]) # add exclusion to ensure exceptions are consistent
def _handle_original_exceptions(self):
"""
This method ensures that exceptions present in the original systems are present in the hybrid appropriately.
"""
#get what we need to find the exceptions from the new and old systems:
old_system_nonbonded_force = self._old_system_forces['NonbondedForce']
new_system_nonbonded_force = self._new_system_forces['NonbondedForce']
hybrid_to_old_map = {value: key for key, value in self._old_to_hybrid_map.items()}
hybrid_to_new_map = {value: | |
<reponame>0u812/roadrunner
# Test Module for RoadRunner
#
# Usage:
# import rrTester
# runTester (pathtoModelFile, modelFileName)
#-------------------------------------------------------------
# Tests for steady state and stoichiometric calculations in
# roadRunner. <NAME> November 2012
# Nov 2013: Modified to test Andy's SWIG API
#-------------------------------------------------------------
#------------------------------------------
# Change this line for different test files
#nameOfResultsFile = 'results_roadRunnerTest_1.txt'
import sys
import random
import string
import roadrunner
from numpy import *
import os
# Module wide file handle
fHandle = ''
rpadding = 45
sbmlStr = ''
JarnacStr = ''
def defaultTestFilePath():
"""
get the full path of the default data file
"""
me = os.path.realpath(__file__)
base = os.path.split(me)[0]
testfile = os.path.join(base, 'results_roadRunnerTest_1.txt')
if os.path.isfile(testfile):
return testfile
else:
raise Exception('instalation error, test file, ' + testfile + ' does not exist')
# --------------------------------------------------------------------------
# SUPPORT ROUTINES
# --------------------------------------------------------------------------
def expectApproximately (a, b, tol):
diff = a - b
return abs(diff) < tol
def passMsg (errorFlag):
if errorFlag:
return "*****FAIL*****"
else:
return "PASS"
# Empty lines are ignored
# Lines starting with # are also ignored
def readLine ():
line = fHandle.readline()
while line == '\n':
line = fHandle.readline()
while line == '':
line = fHandle.readline()
while (line[0] == '#') or (line == '') or (line[0] == '\n'):
if line == '':
return line
line = fHandle.readline();
return line.strip('\n')
def jumpToNextTest():
line = readLine()
#line = ''
#while line == '':
# line = fHandle.readline().strip ('\n')
while line[0] != '[':
line = readLine()
return line
def getSBMLStr ():
sbmlStr = ''
line = fHandle.readline()
while (line != '[END_MODEL]' + '\n'):
sbmlStr = sbmlStr + line
line = fHandle.readline()
return sbmlStr
def getJarnacStr ():
JarnacStr = ''
line = fHandle.readline()
while (line != '[END_MODEL]' + '\n'):
JarnacStr = JarnacStr + line
line = fHandle.readline()
return JarnacStr
def loadSBMLModelFromTestFile ():
testId = jumpToNextTest()
if testId == '[SBML]':
return getSBMLStr ()
def loadJarnacModelFromTestFile ():
testId = jumpToNextTest ()
if testId == '[JARNAC]':
return getJarnacStr ()
# ------------------------------------------------------------------------
# TESTS START HERE
# ------------------------------------------------------------------------
def setConservationLaw(rrInstance, testId):
line = readLine ()
if line == 'True':
rrInstance.conservedMoietyAnalysis = True
else:
rrInstance.conservedMoietyAnalysis = False
def mySetSteadyStateSelectionList(rrInstance, testId):
line = readLine ()
words = line.split()
rrInstance.steadyStateSelections = words
def myComputeSteadyState(rrInstance, testId):
line = readLine ()
if line == "True":
print "Compute Steady State, distance to SteadyState:", rrInstance.steadyState()
def checkSpeciesConcentrations(rrInstance, testId):
words = []
species = []
m = rrInstance.model.getNumFloatingSpecies()
for i in range (0,m):
line = readLine ()
words = line.split()
words.append (rrInstance.model[words[0]])
species.append (words)
# Steady State Concentrations
print string.ljust ("Check " + testId, rpadding),
errorFlag = False
for i in range (0,m):
expectedValue = float (species[i][1])
if expectApproximately (expectedValue, species[i][2], 1E-6) == False:
errorFlag = True
break
print passMsg (errorFlag)
def checkFluxes(rrInstance, testId):
words = []
fluxes = []
# Steady State Fluxes
print string.ljust ("Check " + testId, rpadding),
errorFlag = False
m = rrInstance.model.getNumFloatingSpecies()
n = rrInstance.model.getNumReactions();
for i in range (0,n):
line = readLine ()
words = line.split()
words.append (rrInstance.model[words[0]])
fluxes.append (words)
for i in range (0,n):
expectedValue = float (fluxes[i][1])
if expectApproximately (expectedValue, fluxes[i][2], 1E-6) == False:
errorFlag = True
break
print passMsg (errorFlag)
def checkFullJacobian(rrInstance, testId):
# Jacobian
print string.ljust ("Check " + testId, rpadding),
errorFlag = False
m = rrInstance.model.getNumFloatingSpecies()
Jacobian = rrInstance.getFullJacobian()
for i in range(0,m):
line = readLine ()
words = line.split()
for j in range(0,m):
expectedValue = float(words[j])
if expectApproximately (expectedValue, Jacobian[i,j], 1E-6) == False:
errorFlag = True
break
print passMsg (errorFlag)
def checkIndividualEigenvalues(rrInstance, testId):
# Eigenvalues
print string.ljust ("Check " + testId, rpadding),
errorFlag = False
m = rrInstance.model.getNumFloatingSpecies()
try:
for i in range(0,m):
line = readLine ()
words = line.split()
eigenvalueName = words[0]
realPart = rrInstance.getValue ('eigen(' + eigenvalueName + ')')
realPart = float (realPart)
if expectApproximately (realPart, float(words[1]), 1E-6) == False:
errorFlag = True
break
print passMsg (errorFlag)
except Exception, e:
print('Unexpected error in checkIndividualEigenvalues:' + str(e))
def checkEigenvalueMatrix(rrInstance, testId):
# Eigenvalues
print string.ljust ("Check " + testId, rpadding),
errorFlag = False
m = rrInstance.model.getNumFloatingSpecies()
eigenvalues = rrInstance.getEigenvalues()
for i in range(0,m):
line = readLine ()
words = line.split()
realPart = float (words[0])
# Check if there is an imaginary part
if len (words) == 1:
imagPart = 0
else:
imagPart= float (words[1])
if (expectApproximately (realPart, eigenvalues[i,0], 1E-6) == False) or (expectApproximately (imagPart, eigenvalues[i,1], 1E-6)) == False:
errorFlag = True
break
print passMsg (errorFlag)
def checkStoichiometryMatrix(rrInstance, testId):
# Stoichiometry matrix
print string.ljust ("Check " + testId, rpadding),
errorFlag = False
m = rrInstance.model.getNumFloatingSpecies()
n = rrInstance.model.getNumReactions();
st = rrInstance.model.getStoichiometryMatrix()
for i in range(0,m):
line = readLine ()
words = line.split()
for j in range(0,n):
if expectApproximately(float (words[j]), st[i,j], 1E-6) == False:
errorFlag = True
break
print passMsg (errorFlag)
def checkLinkMatrix(rrInstance, testId):
# Link matrix
print string.ljust ("Check " + testId, rpadding),
errorFlag = False
m = rrInstance.model.getNumFloatingSpecies()
st = rrInstance.getLinkMatrix()
for i in range(0,m):
words = readLine ().split()
for j in range(0,m):
if expectApproximately(float (words[j]), st[i,j], 1E-6) == False:
errorFlag = True
break
print passMsg (errorFlag)
def checkUnscaledConcentrationControlMatrix(rrInstance, testId):
# Unscaled Concentration Control matrix
print string.ljust ("Check " + testId, rpadding),
words = []
errorFlag = False
m = rrInstance.model.getNumFloatingSpecies()
n = rrInstance.model.getNumReactions();
st = rrInstance.getUnscaledConcentrationControlCoefficientMatrix();
for i in range(0,m):
words = readLine ().split()
for j in range(0,n):
if expectApproximately(float (words[j]), st[i,j], 1E-6) == False:
errorFlag = True
break
print passMsg (errorFlag)
def checkScaledConcentrationControlMatrix(rrInstance, testId):
# Unscaled Concentration Control matrix
print string.ljust ("Check " + testId, rpadding),
words = []
errorFlag = False
m = rrInstance.model.getNumFloatingSpecies()
n = rrInstance.model.getNumReactions();
st = rrInstance.getScaledConcentrationControlCoefficientMatrix();
for i in range(0,m):
words = readLine ().split()
for j in range(0,n):
if expectApproximately(float (words[j]), st[i,j], 1E-6) == False:
errorFlag = True
break
print passMsg (errorFlag)
def checkUnscaledFluxControlCoefficientMatrix(rrInstance, testId):
# Unscaled Flux Control matrix
print string.ljust ("Check " + testId, rpadding),
words = []
errorFlag = False
n = rrInstance.model.getNumReactions();
st = rrInstance.getUnscaledFluxControlCoefficientMatrix();
for i in range(0,n):
words = readLine ().split()
for j in range(0,n):
if expectApproximately(float (words[j]), st[i,j], 1E-6) == False:
errorFlag = True
break
print passMsg (errorFlag)
def checkScaledFluxControlCoefficientMatrix(rrInstance, testId):
# Unscaled Flux Control matrix
print string.ljust ("Check " + testId, rpadding),
words = []
errorFlag = False
n = rrInstance.model.getNumReactions();
st = rrInstance.getScaledFluxControlCoefficientMatrix()
for i in range(0,n):
words = readLine ().split()
for j in range(0,n):
if expectApproximately(float (words[j]), st[i,j], 1E-6) == False:
errorFlag = True
break
print passMsg (errorFlag)
def checkUnscaledElasticityMatrix(rrInstance, testId):
# Jacobian
print string.ljust ("Check " + testId, rpadding),
errorFlag = False
m = rrInstance.model.getNumFloatingSpecies()
uee = rrInstance.getUnscaledElasticityMatrix()
for i in range(0,m):
line = readLine ()
words = line.split()
for j in range(0,m):
expectedValue = float(words[j])
if expectApproximately (expectedValue, uee[i,j], 1E-6) == False:
errorFlag = True
break
print passMsg (errorFlag)
def checkScaledElasticityMatrix(rrInstance, testId):
# Jacobian
print string.ljust ("Check " + testId, rpadding),
errorFlag = False
m = rrInstance.model.getNumFloatingSpecies()
ee = rrInstance.getScaledElasticityMatrix()
for i in range(0,m):
line = readLine ()
words = line.split()
for j in range(0,m):
expectedValue = float(words[j])
if expectApproximately (expectedValue, ee[i,j], 1E-6) == False:
errorFlag = True
break
print passMsg (errorFlag)
def checkGetFloatingSpeciesIds(rrInstance, testId):
print string.ljust ("Check " + testId, rpadding),
errorFlag = False
line = readLine ()
words = line.split()
expected = rrInstance.model.getFloatingSpeciesIds()
m = rrInstance.model.getNumFloatingSpecies()
for i in range(0,m):
if words[i] != expected[i]:
errorFlag = True
break
print passMsg (errorFlag)
def checkGetBoundarySpeciesIds(rrInstance, testId):
print string.ljust ("Check " + testId, rpadding),
errorFlag = False
line = readLine ()
words = line.split()
expected = rrInstance.model.getBoundarySpeciesIds()
m = rrInstance.model.getNumBoundarySpecies()
for i in range(0,m):
if words[i] != expected[i]:
errorFlag = True
break
print passMsg (errorFlag)
def checkGetGlobalParameterIds (rrInstance, testId):
print string.ljust ("Check " + testId, rpadding),
errorFlag = False
line = readLine ()
words = line.split()
expected = rrInstance.model.getGlobalParameterIds()
m = rrInstance.model.getNumGlobalParameters()
for i in range(0,m):
if words[i] != expected[i]:
errorFlag = True
break
print passMsg (errorFlag)
def checkGetCompartmentIds (rrInstance, testId):
print string.ljust ("Check " + testId, rpadding),
errorFlag = False
line = readLine ()
words = line.split()
expected = rrInstance.model.getCompartmentIds()
m = rrInstance.model.getNumCompartments()
for i in range(0,m):
if words[i] != expected[i]:
errorFlag = True
break
print passMsg (errorFlag)
def checkReactionIds (rrInstance, testId):
print string.ljust ("Check " + testId, rpadding),
errorFlag = False
line = readLine ()
words = line.split()
expected = rrInstance.model.getReactionIds()
m = rrInstance.model.getNumReactions();
for i in range(0,m):
if words[i] != expected[i]:
errorFlag = True
break
print passMsg (errorFlag)
def checkFloatingSpeciesInitialConditionIds (rrInstance, testId):
print | |
<reponame>bmdepesa/validation-tests
from common_fixtures import * # NOQA
shared_services = []
@pytest.fixture(scope='session', autouse=True)
def create_services_for_selectors(request, client, admin_client):
labels = [{"c1": "value1"}, {"c1": "value2"}, {"c2": "value1"},
{"c2": "value2"}, {"c2": "value3"},
{"c2": "value4", "c1": "value3"},
{"c2": "value4", "c1": "value4"}]
env = create_env(client)
for label in labels:
launch_config = {"imageUuid": WEB_IMAGE_UUID,
"labels": label}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=2)
service = client.wait_success(service, 60)
shared_services.append(service)
def fin():
delete_all(client, shared_services)
request.addfinalizer(fin)
def env_with_service_selectorContainer(admin_client, client, label):
launch_config_svc = {"imageUuid": WEB_IMAGE_UUID}
# Create Environment
env = create_env(client)
# Create Service
random_name = random_str()
service_name = random_name.replace("-", "")
service = client.create_service(
name=service_name, environmentId=env.id,
launchConfig=launch_config_svc, scale=2,
selectorContainer=label["name"]+"="+label["value"])
service = client.wait_success(service)
assert service.state == "inactive"
service.activate()
service = client.wait_success(service)
assert service.state == "active"
c = client.create_container(name=random_str(),
networkMode=MANAGED_NETWORK,
imageUuid=WEB_IMAGE_UUID,
labels={label["name"]: label["value"]}
)
c = client.wait_success(c)
containers = get_service_container_list(admin_client, service, managed=0)
assert len(containers) == 1
assert containers[0].id == c.id
return env, service, c
def create_env_with_svc_options(client, launch_config_svc,
scale_svc, metadata=None,
selectorLink=None,
selectorContainer=None):
# Create Environment
env = create_env(client)
# Create Service
random_name = random_str()
service_name = random_name.replace("-", "")
service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=scale_svc,
metadata=metadata,
selectorLink=selectorLink,
selectorContainer=selectorContainer)
return env, service
def test_selectorLink(admin_client, client):
port = "4000"
launch_config = {"imageUuid": WEB_IMAGE_UUID,
"labels": {"test1": "bar"}}
launch_config_svc = {"imageUuid": SSH_IMAGE_UUID,
"ports": [port+":22/tcp"]}
env, service = create_env_with_svc_options(client, launch_config_svc,
2, selectorLink="test1=bar")
linked_service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=2)
linked_service = client.wait_success(linked_service)
assert linked_service.state == "inactive"
env = env.activateservices()
service = client.wait_success(service, 300)
assert service.state == "active"
linked_service = client.wait_success(linked_service, 300)
assert linked_service.state == "active"
validate_linked_service(admin_client, service, [linked_service], port)
delete_all(client, [env])
def test_selectorLink_lbservice(admin_client, client, socat_containers):
port = "4001"
launch_config = {"imageUuid": WEB_IMAGE_UUID,
"labels": {"test2": "bar"}}
launch_config_lb = {"ports": port+":80"}
env = create_env(client)
lb_service = client.create_loadBalancerService(
name="lb-1",
environmentId=env.id,
launchConfig=launch_config_lb,
scale=1, selectorLink="test2=bar")
lb_service = client.wait_success(lb_service)
assert lb_service.state == "inactive"
linked_service1 = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=2)
linked_service1 = client.wait_success(linked_service1)
assert linked_service1.state == "inactive"
linked_service2 = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=2)
linked_service2 = client.wait_success(linked_service2)
assert linked_service2.state == "inactive"
linked_service1.activate()
linked_service2.activate()
lb_service.activate()
linked_service1 = client.wait_success(linked_service1, 300)
assert linked_service1.state == "active"
linked_service2 = client.wait_success(linked_service2, 300)
assert linked_service2.state == "active"
lb_service = client.wait_success(lb_service, 300)
assert lb_service.state == "active"
wait_for_lb_service_to_become_active(admin_client, client,
[linked_service1, linked_service2],
lb_service)
validate_lb_service(admin_client, client, lb_service, port,
[linked_service1, linked_service2])
delete_all(client, [env])
def test_selectorLink_dnsservice(admin_client, client):
port = "4002"
launch_config = {"imageUuid": WEB_IMAGE_UUID,
"labels": {"test3": "bar"}}
client_launch_config_svc = {"imageUuid": SSH_IMAGE_UUID,
"ports": [port+":22/tcp"]}
env = create_env(client)
dns = client.create_dnsService(
name="dns-1",
environmentId=env.id,
scale=1, selectorLink="test3=bar")
dns = client.wait_success(dns)
assert dns.state == "inactive"
linked_service1 = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=2)
linked_service1 = client.wait_success(linked_service1)
assert linked_service1.state == "inactive"
linked_service2 = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=2)
linked_service2 = client.wait_success(linked_service2)
assert linked_service2.state == "inactive"
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=client_launch_config_svc,
scale=1)
service = client.wait_success(service)
link_svc(admin_client, service, [dns])
service.activate()
linked_service1.activate()
linked_service2.activate()
dns.activate()
linked_service1 = client.wait_success(linked_service1, 300)
assert linked_service1.state == "active"
linked_service2 = client.wait_success(linked_service2, 300)
assert linked_service2.state == "active"
dns = client.wait_success(dns, 300)
assert dns.state == "active"
validate_dns_service(
admin_client, service, [linked_service1, linked_service2], port,
dns.name)
delete_all(client, [env])
def test__selectorLink_tolinkto_dnsservice(admin_client, client):
port = "4003"
launch_config = {"imageUuid": WEB_IMAGE_UUID,
"labels": {"test5": "bar"}}
client_launch_config_svc = {"imageUuid": SSH_IMAGE_UUID,
"ports": [port+":22/tcp"],
"labels": {"dns": "mydns"}}
dns_launch_config = {"labels": {"dns": "mydns"}}
env = create_env(client)
dns = client.create_dnsService(
name="dns-1",
environmentId=env.id,
scale=1, selectorLink="test5=bar",
launchConfig=dns_launch_config)
dns = client.wait_success(dns)
assert dns.state == "inactive"
linked_service1 = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=2)
linked_service1 = client.wait_success(linked_service1)
assert linked_service1.state == "inactive"
linked_service2 = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=2)
linked_service2 = client.wait_success(linked_service2)
assert linked_service2.state == "inactive"
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=client_launch_config_svc,
selectorLink="dns=mydns",
scale=1)
service = client.wait_success(service)
assert service.state == "inactive"
service.activate()
linked_service1.activate()
linked_service2.activate()
dns.activate()
linked_service1 = client.wait_success(linked_service1, 300)
assert linked_service1.state == "active"
linked_service2 = client.wait_success(linked_service2, 300)
assert linked_service2.state == "active"
dns = client.wait_success(dns, 300)
assert dns.state == "active"
validate_dns_service(
admin_client, service, [linked_service1, linked_service2], port,
dns.name)
delete_all(client, [env])
def test_selectorContainer_service_link(admin_client, client):
port = "5000"
labels = {}
labels["name"] = "testc1"
labels["value"] = "bar"
env, consumed_service, c = env_with_service_selectorContainer(
admin_client, client, labels)
launch_config_svc = {"imageUuid": SSH_IMAGE_UUID,
"ports": [port+":22/tcp"]}
# Create Service
random_name = random_str()
service_name = random_name.replace("-", "")
service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=1)
service = client.wait_success(service)
assert service.state == "inactive"
service.activate()
service.addservicelink(serviceLink={"serviceId": consumed_service.id})
service = client.wait_success(service, 120)
consumed_service = client.wait_success(consumed_service, 120)
assert service.state == "active"
assert consumed_service.state == "active"
validate_add_service_link(admin_client, service, consumed_service)
unmanaged_con = {}
unmanaged_con[consumed_service.id] = [c]
validate_linked_service(admin_client, service, [consumed_service], port,
unmanaged_cons=unmanaged_con)
delete_all(client, [env, c])
def test_selectorContainer_dns(admin_client, client):
port = "4010"
launch_config_svc = {"imageUuid": SSH_IMAGE_UUID,
"ports": [port+":22/tcp"]}
launch_config_consumed_svc = {"imageUuid": WEB_IMAGE_UUID}
# Create Environment for dns service and client service
env = create_env(client)
c1 = client.create_container(name=random_str(),
networkMode=MANAGED_NETWORK,
imageUuid=WEB_IMAGE_UUID,
labels={"dns1": "value1"}
)
c1 = client.wait_success(c1)
c2 = client.create_container(name=random_str(),
networkMode=MANAGED_NETWORK,
imageUuid=WEB_IMAGE_UUID,
labels={"dns2": "value2"}
)
c2 = client.wait_success(c2)
random_name = random_str()
service_name = random_name.replace("-", "")
service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=1)
service = client.wait_success(service)
assert service.state == "inactive"
random_name = random_str()
service_name = random_name.replace("-", "")
consumed_service = client.create_service(
name=service_name, environmentId=env.id,
launchConfig=launch_config_consumed_svc, scale=2,
selectorContainer="dns1=value1")
consumed_service = client.wait_success(consumed_service)
assert consumed_service.state == "inactive"
random_name = random_str()
service_name = random_name.replace("-", "")
consumed_service1 = client.create_service(
name=service_name, environmentId=env.id,
launchConfig=launch_config_consumed_svc, scale=2,
selectorContainer="dns2=value2")
consumed_service1 = client.wait_success(consumed_service1)
assert consumed_service1.state == "inactive"
# Create DNS service
dns = client.create_dnsService(name='WEB1',
environmentId=env.id)
dns = client.wait_success(dns)
env.activateservices()
service.addservicelink(serviceLink={"serviceId": dns.id})
dns.addservicelink(serviceLink={"serviceId": consumed_service.id})
dns.addservicelink(serviceLink={"serviceId": consumed_service1.id})
service = client.wait_success(service, 120)
consumed_service = client.wait_success(consumed_service, 120)
consumed_service1 = client.wait_success(consumed_service1, 120)
dns = client.wait_success(dns, 120)
assert service.state == "active"
assert consumed_service.state == "active"
assert consumed_service1.state == "active"
unmanaged_con = {}
unmanaged_con[consumed_service.id] = [c1]
unmanaged_con[consumed_service1.id] = [c2]
validate_dns_service(
admin_client, service, [consumed_service, consumed_service1], port,
dns.name, unmanaged_cons=unmanaged_con)
delete_all(client, [env, c1, c2])
def test_selectorContainer_lb(admin_client, client, socat_containers):
port = "9011"
service_scale = 2
lb_scale = 1
launch_config_svc = {"imageUuid": WEB_IMAGE_UUID}
launch_config_lb = {"ports": [port+":80"]}
c1 = client.create_container(name=random_str(),
networkMode=MANAGED_NETWORK,
imageUuid=WEB_IMAGE_UUID,
labels={"web1": "lb"}
)
c1 = client.wait_success(c1)
c2 = client.create_container(name=random_str(),
networkMode=MANAGED_NETWORK,
imageUuid=WEB_IMAGE_UUID,
labels={"web2": "lb"}
)
c2 = client.wait_success(c2)
# Create Environment
env = create_env(client)
# Create Service1
random_name = random_str()
service_name = random_name.replace("-", "")
service1 = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=service_scale,
selectorContainer="web1=lb"
)
service1 = client.wait_success(service1)
assert service1.state == "inactive"
# Create Service2
random_name = random_str()
service_name = random_name.replace("-", "")
service2 = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=service_scale,
selectorContainer="web2=lb"
)
service2 = client.wait_success(service2)
assert service2.state == "inactive"
# Create LB Service
random_name = random_str()
service_name = "LB-" + random_name.replace("-", "")
lb_service = client.create_loadBalancerService(
name=service_name,
environmentId=env.id,
launchConfig=launch_config_lb,
scale=lb_scale)
lb_service = client.wait_success(lb_service)
assert lb_service.state == "inactive"
service1.activate()
service2.activate()
lb_service.activate()
service_link = {"serviceId": service1.id}
lb_service.addservicelink(serviceLink=service_link)
service_link = {"serviceId": service2.id}
lb_service.addservicelink(serviceLink=service_link)
service1 = client.wait_success(service1, 180)
service2 = client.wait_success(service2, 180)
lb_service = client.wait_success(lb_service, 180)
assert service1.state == "active"
assert service2.state == "active"
assert lb_service.state == "active"
unmanaged_con = {}
unmanaged_con[service1.id] = [c1.externalId[:12]]
unmanaged_con[service2.id] = [c2.externalId[:12]]
wait_for_lb_service_to_become_active(admin_client, client,
[service1, service2], lb_service,
unmanaged_con_count=2)
validate_lb_service(admin_client, client, lb_service, port,
[service1, service2], unmanaged_cons=unmanaged_con)
delete_all(client, [env, c1, c2])
def test_selectorContainer_no_image_with_lb(
admin_client, client, socat_containers):
port = "9012"
lb_scale = 1
launch_config_svc = {"imageUuid": "docker:rancher/none"}
launch_config_lb = {"ports": [port+":80"]}
# Create Environment
env = create_env(client)
# Create Service1
random_name = random_str()
service_name = random_name.replace("-", "")
service1 = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=0,
selectorContainer="web1=lbn"
)
service1 = client.wait_success(service1)
assert service1.state == "inactive"
# Create Service2
random_name = random_str()
service_name = random_name.replace("-", "")
service2 = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=0,
selectorContainer="web2=lbn"
)
service2 = client.wait_success(service2)
assert service2.state == "inactive"
# Create LB Service
random_name = random_str()
service_name = "LB-" + random_name.replace("-", "")
lb_service = client.create_loadBalancerService(
name=service_name,
environmentId=env.id,
launchConfig=launch_config_lb,
scale=lb_scale)
lb_service = client.wait_success(lb_service)
assert lb_service.state == "inactive"
service1.activate()
service2.activate()
lb_service.activate()
service_link = {"serviceId": service1.id}
lb_service.addservicelink(serviceLink=service_link)
service_link = {"serviceId": service2.id}
lb_service.addservicelink(serviceLink=service_link)
service1 = client.wait_success(service1, 180)
service2 = client.wait_success(service2, 180)
lb_service = client.wait_success(lb_service, 180)
assert service1.state == "active"
assert service2.state == "active"
assert lb_service.state == "active"
wait_for_lb_service_to_become_active(admin_client, client,
[service1, service2], lb_service)
service3 = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig={"imageUuid": WEB_IMAGE_UUID,
"labels": {"web1": "lbn"}},
scale=1)
service3 = client.wait_success(service3)
assert service3.state == "inactive"
service3 = client.wait_success(service3.activate(), 60)
service4 = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig={"imageUuid": WEB_IMAGE_UUID,
"labels": {"web2": "lbn"}},
scale=1)
service4 = client.wait_success(service4)
assert service4.state == "inactive"
service4 = client.wait_success(service4.activate(), 60)
unmanaged_con = {}
unmanaged_con[service1.id] = get_container_names_list(
admin_client, [service3])
unmanaged_con[service2.id] = get_container_names_list(
admin_client, [service4])
wait_for_lb_service_to_become_active(admin_client, client,
[service1, service2], lb_service,
unmanaged_con_count=2)
validate_lb_service(admin_client, client, lb_service, port,
[service1, service2], unmanaged_cons=unmanaged_con)
delete_all(client, [env])
def test_selectorContainer_for_service_reconciliation_on_stop(
admin_client, client, socat_containers):
labels = {}
labels["name"] = "testc2"
labels["value"] = "bar"
env, service, c = env_with_service_selectorContainer(
admin_client, client, labels)
# Stop 2 containers of the service
assert service.scale > 1
containers = get_service_container_list(admin_client, service, managed=1)
assert len(containers) == service.scale
assert service.scale > 1
container1 = containers[0]
| |
the old stack counter for dead code elimination
stackRestore = self.stackCounter
# keep track of old assembly so that dead code can be removed
asmrestore = len(self.asm)
# declaration datatype
t = self.checkForType()
# ensure that non-valuetype structures cannot be declared in registers
if(not (isIntrinsic(t.name)) and t.ptrdepth == 0 and register):
throw(RegsiterStructure(self.current_token))
dtok = self.current_token
# get either the name, or the first name
name = self.checkForId()
# check if variable exists already
if(self.getVariable(name) is not None and not self.getVariable(name).glob):
throw(VariableRedeclaration(self.tokens[self.ctidx - 1], name))
# check if varname is a datatype
if(self.compiler.getType(name) is not None):
throw(UsingTypenameAsVariable(self.tokens[self.ctidx - 1]))
# build a variable
var = self.constructVar(t, name, register)
var.dtok = dtok
# if the variable is a stack-based structure,
# add its member variables too.
if(not var.isptr and var.t.members is not None):
self.buildStackStructure(var)
self.stackCounter += 8
sizes = [1]
isarr = False
# if it is a stack-based structure, and it has a destructor
# add it's destructor to the end of the function
if(not var.t.isintrinsic()) and (var.t.function_template is None and var.t.ptrdepth == 0):
exlicitConstructor = False
# check for explicit constructor
if (self.current_token.tok == T_OPENP):
exlicitConstructor = True
self.advance()
types = [var.t.up()] + self.determineFnCallParameterTypes()
# implicit constructor
else:
types = [var.t.up()]
# find Function object of constructor, and call it if able /
# necessary
constructor = var.t.getConstructor(types)
if constructor is not None:
if exlicitConstructor:
self.ctidx -= 2
self.memberCall(constructor, var, False)
self.addline("pop rax")
self.advance()
else:
if self.memberfn:
self.addline(f"push rdi")
self.addline(f"lea rdi, [rbp-{var.offset+var.t.s}]\n")
self.addline(fncall(constructor))
if self.memberfn:
self.addline(f"pop rdi")
elif not exlicitConstructor:
pass
else:
throw(UnkownConstructor(self.tokens[self.ctidx - 1]))
autoArrsize = False
# check for stack based array declaration
while self.current_token.tok == "[":
isarr = True
self.advance()
if self.current_token.tok == "]":
autoArrsize = True
self.advance()
break
# collect tokens for a constexpr that will be the size of the array
exprtokens = [self.current_token]
self.advance()
while self.current_token.tok != T_CLSIDX:
exprtokens.append(self.current_token)
self.advance()
# evaluate for size
size = determineConstexpr(False, exprtokens, self)
# ensure that the size is not a variable
if isinstance(size.accessor, Variable):
throw(ExpectedToken(self.current_token, "constexpr"))
sizes.append(size.accessor)
self.checkTok(T_CLSIDX)
# stack arrays:
if(isarr):
# build properties:
totalsize = (product(sizes) - 1) * t.csize()
# stack size
var.stackarrsize = totalsize
var.isStackarr = True
# sizes plural for multi-dimentional
var.stacksizes = sizes
# extra specification
var.t.stackarr = True
# update stackCounter
self.stackCounter += totalsize
# check for same-line assignment, or not
if(self.current_token.tok == T_ENDL):
self.advance()
return
# multidecl
elif (self.current_token.tok == T_COMMA):
dests = [var]
# parse destinations
while self.current_token.tok == T_COMMA:
self.advance()
xname = self.checkForId()
xvar = self.constructVar(t, xname, register)
# assignment is optimized out in oplvl3
if xvar.name not in self.unreferenced:
dests.append(xvar)
# declaration only
if (self.current_token.tok == T_ENDL):
self.advance()
return
# multi-assignment
elif self.current_token.tok != T_EQUALS:
throw(ExpectedToken(self.current_token, "="))
self.advance()
# get assignment value
instr, value = self.evaluateExpression()
# custom evaluator for multi-assignment
evaluator = ExpressionEvaluator(self)
# template postfix for assignment (Replace None with destination)
pfix = [
None, value, EC.ExpressionComponent(
"=", VOID, isoperation=True)]
if (isinstance(value, Variable)
and value.register is None) or not value.isRegister():
# load value to register if not already in one, for faster assignment to multiple
# locations
reg = ralloc(t.isflt())
instr += loadToReg(reg, value.accessor)
value.accessor = reg
value.type = t
# loop through destinations and perform assignment
for v in dests:
# fill in pfix template
pfix[0] = EC.ExpressionComponent(v, v.t)
# evaluate expression
loadInstr, _ = evaluator.evaluatePostfix(pfix, evaluator)
instr += loadInstr
# cleanup
rfree(_.accessor)
# final cleanup
rfree(value.accessor)
self.addline(instr)
self.checkSemi()
return
if(self.current_token.tok != T_EQUALS):
throw(ExpectedToken(self.current_token, " = or ; "))
self.advance()
# normal inline assignment
if(not isarr):
# move back for complete expression
self.ctidx -= 3
self.advance()
# record asm state in case this turns out to be dead code
instr, __ = self.evaluateExpression(destination=False)
if var.name not in self.unreferenced:
self.addline(instr)
else:
# if this is dead code, remove all the assembly and delete the
# variable
self.asm = self.asm[:asmrestore]
self.popVar()
self.stackCounter = stackRestore
rfree(__.accessor)
var.referenced = False
var.refcount = 0
# array assignment
else:
# itervar is used to iterate over the contents of the array
# in order to place values in indexes.
itervar = Variable(var.t, var.name, isStackarr=True)
itervar.offset = var.offset
itervar.stackarrsize = var.stackarrsize
# track automatic length assignment
autolen = 0
# set literal is used
if(self.current_token.tok == T_OPENSCOPE):
# find the bounds of the literal
startok = self.ctidx
self.skipBody()
endtok = self.ctidx + 1
# load to a list
setval = buildConstantSet(
var.t.isflt(), self.tokens[startok:endtok], self)
# check for size mismatch
if not autoArrsize and (len(setval.accessor) != sizes[1]):
throw(SetLiteralSizeMismatch(self.tokens[startok]))
if autoArrsize:
itervar.offset += (len(setval.accessor) -
1) * var.t.csize()
#var.offset = itervar.offset
# load values
self.advance()
for value in setval.accessor:
if not typematch(value.type, var.t, False):
throw(TypeMismatch(value.token, value.type, var.t))
if isinstance(value.accessor, int):
if(var.t.isfltarr()):
self.addline(
loadToReg(
itervar, floatTo64h(
value.accessor)))
else:
self.addline(loadToReg(itervar, value.accessor))
elif isinstance(value.accessor, Variable) and not (value.accessor.name.startswith("__LC.F")):
self.addline(
loadToReg(
itervar, value.accessor
)
)
else:
self.addline(
loadToReg(
'rax', floatTo64h(
value.accessor.initializer)))
self.addline(
loadToReg(itervar, 'rax')
)
itervar.offset -= var.t.csize()
autolen = (len(setval.accessor) - 1) * var.t.csize()
elif self.current_token.tok == T_ID and \
"__LC.S" in self.current_token.value:
# setup for string packing
# (Taking a char* and turning it into multiple big numbers)
v = self.getVariable(self.current_token.value)
if v is None:
throw(UnkownIdentifier(self.current_token))
self.advance()
content = v.initializer[1:-1]
if not autoArrsize:
offset = var.offset + var.stackarrsize
else:
offset = var.offset + len(content)
var.offset = offset
content = eval(f'"{content}"')
longs, ints, shorts, chars = pack_string(var, content)
for l in longs:
self.addline(
f"mov rax, {l}\nmov qword[rbp-{offset}], rax\n"
)
offset -= 8
for i in ints:
self.addline(
f"mov dword[rbp-{offset}], {i}\n"
)
offset -= 4
for s in shorts:
self.addline(
f"mov word[rbp-{offset}], {s}\n"
)
offset -= 2
for c in chars:
self.addline(
f"mov byte[rbp-{offset}], {c}\n"
)
offset -= 1
autolen = len(content)
# single value to fill accross
else:
# check for redundant auto array declaration
if autoArrsize:
throw(AutoArrsizeForSingle(self.current_token))
# evaluate the new value
evaluation, value = self.evaluateExpression()
self.addline(evaluation)
# determine value to load into memory
loadval = value.accessor if isinstance(
value.accessor,
int) else (
floatTo64h(
value.accessor) if isinstance(
value.accessor,
float) else floatTo64h(
value.accessor.initializer))
# load value into each index of the array
for i in range(sizes[1]):
self.addline(loadToReg(itervar, loadval))
itervar.offset -= var.t.csize()
autolen = 0
if autoArrsize:
var.stackarrsize = autolen
self.stackCounter += autolen + 8
self.checkSemi()
# build function call not in an expression
@DeprecationWarning
def buildBlankfnCall(self):
# \see self.buildFunctionCall()
instructions, fn = self.buildFunctionCall()
if(self.current_token.tok == ")"):
self.advance()
self.checkSemi()
self.addline(instructions)
# evaluate an ambiguous expression.
# \see ExpressionEvaluator
def buildAssignment(self):
# buildAssignment is now just a wrapper for general expression evaluation
# because assignment operators are now included in the normal expression
# evaluation.
# destination is false because the overall expression has no
# destination
insters, out = self.evaluateExpression(destination=False)
self.addline(insters)
self.advance()
rfree(out.accessor)
def buildLabel(self):
name = self.current_token.value
# get an asm label to correspond with this label
asmname = getLogicLabel(
f"USERDEF.{name}") if name not in self.userlabels else self.userlabels[name]
# add the asm label to the current compilation location for jumping
self.addline(f"{asmname}:")
# record data in userlabels
self.userlabels[name] = asmname
# close up
self.advance()
# label declarations do not require a ';' because they are a simple
# two token declaration, so the code after them can share a line or
# not.
self.checkTok(":")
# build statement starting with an ambiguous ID token
def buildIDStatement(self):
id = self.current_token.value
if (self.compiler.isType(id)
and self.tokens[self.ctidx + 1].tok != T_OPENP):
self.buildDeclaration() # declaration
elif (self.tokens[self.ctidx + 1].tok == ":"):
self.buildLabel()
else:
# assignment or blank call
self.buildAssignment()
# else:
# # throw(UnkownIdentifier(self.current_token))
# compile a single line
def compileLine(self):
if(self.current_token.tok == T_KEYWORD):
# keyword statement
self.buildKeywordStatement()
elif (self.current_token.tok == T_ID):
# ID initiated statement
self.buildIDStatement()
elif(self.current_token.tok in OPERATORS):
self.buildAssignment()
else:
pass # ambiguous statement
throw(UnexpectedToken(self.current_token))
self.advance()
# compile the body of some control structures
def compileBodyScope(self):
self.max_depth += 1
self.push_stackstate()
self.beginRecursiveCompile()
self.pop_stackstate()
def beginRecursiveCompile(self): # recursive main
opens = 1 # maintain track of open and close scopes ("{, }")
self.recursive_depth += 1
while opens > 0 and self.current_token.tok != | |
from inspect import getattr_static
from platform import node
import sqlite3
import asyncio
from sys import argv, exit, stdout, executable
from os import execl
from os.path import dirname, realpath, join
from PyQt6 import QtCore, QtWidgets
from PyQt6.QtCore import QThread, QTimer,pyqtSlot
from PyQt6.QtWidgets import QMainWindow, QHeaderView, QCheckBox, QLineEdit, QLabel
#from data_handler import NodeStructure as ns
from data_handler import byte_swap_method,str_to_list
import opc_platform_server as main_server
from main_gui import Ui_MainWindow as gui
import win32con
import win32api
from win32api import GetLogicalDriveStrings
from win32file import GetDriveType
from datetime import datetime
from dialog import MessageBox,ExportOeeDialog, ExportLogsDialog, Dialog
from csv import writer
from configparser import ConfigParser
from PyQt6.QtWidgets import QTableWidgetItem
from PyQt6.QtGui import QIntValidator
from opc_server_class import OpcServerClass as opc_server
class Ui_MainWindow(QMainWindow, gui, opc_server):
def __init__(self):
super(Ui_MainWindow, self).__init__()
config = ConfigParser()
config_file_name = 'config.ini'
file_path = dirname(realpath(argv[0]))
config_file = join(file_path, config_file_name)
config.read(config_file)
# -------client worker thread and signals initialisation-----
self.server_thread = QThread()
self.database_file = config.get('server', 'database_file')
self.server_worker = main_server.OpcServerThread(file_path)
self.server_worker.moveToThread(self.server_thread)
self.server_worker.input_relay_signal.connect(self.input_label_update)
self.server_worker.output_relay_signal.connect(self.output_label_update)
self.server_worker.initialize_ui_label.connect(self.initialize_ui)
self.server_worker.encoder_pos_signal.connect(self.encoder_label_update)
self.server_worker.seconds_signal.connect(self.internal_seconds)
self.server_worker.minutes_signal.connect(self.internal_minutes)
self.server_worker.hours_signal.connect(self.internal_hours)
self.server_worker.oee_time_signal.connect(self.oee_time_update)
self.server_worker.days_signal.connect(self.internal_days)
self.server_worker.months_signal.connect(self.internal_months)
self.server_worker.years_signal.connect(self.internal_years)
self.server_worker.label_update_signal.connect(self.label_updater)
self.server_worker.uph_update_signal.connect(self.uph_update_plot)
self.server_worker.device_status_signal.connect(self.device_status_update)
self.server_worker.module_status_signal.connect(self.module_status_update)
self.server_worker.id_track_signal.connect(self.id_track_update)
self.server_worker.alarm_signal.connect(self.alarm_status_update)
self.server_worker.machine_status_signal.connect(self.machine_status_update)
self.server_worker.reset_lot_oee_signal.connect(self.reset_lot_oee)
self.server_thread.started.connect(self.server_worker.run)
#-----alarm blinker
self.blinkingtimer = QTimer()
self.blinkingtimer.timeout.connect(self.alarm_blinking)
self.default_font_colour = "color: rgb(255, 255, 255);"
self.default_bg_colour = "background-color:rgb(51, 53, 74)"
self.alarm_state = False
self.red_font_color = "color: rgb(255, 14, 14);"
self.white_font_color = "color: rgb(255, 255, 255);"
self.white_bg = "background-color: rgb(255, 255, 255);"
self.red_bg = "background-color: rgb(255, 14, 14);"
#-----id track
self.selected_track_number = 1
#-----alarm_dictionary
self.alarm_table = self.server_ns.alarm_table
#-----motor
self.selected_motor = 1
self.motor_save_node = 12006
#-----uph
self.x = [f"{z:02d}:{r*30:02d}" for z in range(0,24) for r in range(0,2)] #create 24H time range
self.x.append(self.x.pop(self.x.index('00:00')))
self.uph_dict = ()
self.y = [0]
self.plot_bar = None
self.plot_text = []
#-----user credentials
self.level_1 = self.server_ns.get_node_list_by_name('operator_credentials')[0]
self.level_2 = self.server_ns.get_node_list_by_name('engineer_credentials')[0]
self.level_3 = self.server_ns.get_node_list_by_name('oem_credentials')[0]
self.default_access_level = [1,0,1,1,0,1,0,0,0,0,0]
self.user_level = None
# ------control IO ON/OFF Color------------------------------
rgb_value_input_on = "64, 255, 0"
rgb_value_input_off = "0, 80, 0"
self.rgb_input_tuple = (rgb_value_input_off, rgb_value_input_on)
rgb_value_output_on = "255, 20, 20"
rgb_value_output_off = "80, 0, 0"
self.rgb_output_tuple = (rgb_value_output_off, rgb_value_output_on)
self.setupUi(self)
def initialize_ui(self):
self.load_config_method(self.server_ns.light_tower_list)
self.load_shift_time()
self.load_config_method(self.server_ns.api_config)
self.load_config_method(self.server_ns.laser_1_properties)
self.load_config_method(self.server_ns.laser_2_properties)
self.load_config_method(self.server_ns.user_access_settings)
self.load_config_method(self.server_ns.server_variable_list)
self.load_config_method(self.server_ns.time_variable_list)
self.load_config_method(self.server_ns.motor_1_properties)
self.load_config_method(self.server_ns.motor_2_properties)
self.load_config_method(self.server_ns.motor_3_properties)
self.load_config_method(self.server_ns.motor_4_properties)
self.load_config_method(self.server_ns.motor_6_properties)
self.uph_update_plot()
self.logger_handler("INFO", "Successfully load stored settings")
def load_shift_time(self):
[(getattr(self, self.server_ns.read_label_node_structure(node_id)[0]).setTime(datetime.strptime(self.async_run_read_from_opc(node_id), '%H:%M').time())) for node_id in self.server_ns.shift_start_time_node]
def change_password(self):
current_idx = self.username_combo_box.currentIndex()
old_pass = self.old_password_input.text()
new_pass = self.new_password_input.text()
re_new_pass = self.retyped_new_password_input.text()
selected_user_node = getattr(self,f"level_{current_idx+1}")
stored_password = self.async_run_read_from_opc(selected_user_node)
if new_pass != re_new_pass:
self.message_box_show("Password Mismatch. Please Try Again")
elif new_pass == re_new_pass:
if old_pass!= stored_password:
self.message_box_show("Password Mismatch. Please Try Again")
elif old_pass == stored_password:
self.async_run_write_to_opc(selected_user_node, new_pass)
self.message_box_show("New Password Saved")
user_name = self.server_ns.read_name_node_structure(selected_user_node)
user_name_split = user_name.split('_')
print(user_name, user_name_split)
self.logger_handler("INFO", f"Password change for user:{user_name_split[0]}")
@pyqtSlot(list)
def save_config_method(self, node_list):
for node in node_list:
label_list = self.server_ns.read_label_node_structure(node)
ui_object = getattr(self, label_list[0])
test_type = type(ui_object)
if test_type is QCheckBox:
current_value = [str(int(getattr(self, check_box).isChecked())) for check_box in label_list]
current_value_conv = ",".join(current_value)
elif test_type is QLineEdit:
current_value = [(getattr(self, line_edit).text()) for line_edit in label_list]
current_value_conv = current_value[0]
self.async_run_write_to_opc(node, current_value_conv)
@pyqtSlot(list)
def load_config_method(self, node_list):
for node in node_list:
label_list = self.server_ns.read_label_node_structure(node)
if label_list[0] == 'None':
continue
ui_object = getattr(self, label_list[0])
test_type = type(ui_object)
stored_value = self.async_run_read_from_opc(node)
if test_type is QCheckBox:
split_value = str_to_list(stored_value)
current_value = [getattr(self, check_box).setChecked(bool(state)) for check_box, state in zip(label_list, split_value)]
elif test_type is QLineEdit:
self.label_updater(label_list, stored_value)
elif test_type is QLabel:
self.label_updater(label_list, stored_value)
@pyqtSlot(int)
def motor_selection(self, motor_number):
self.selected_motor = motor_number
self.motor_page_stacked_widget.setCurrentIndex(motor_number-1)
self.main_motor_control_stacked_widget.setCurrentIndex(motor_number-1)
for node in self.server_ns.encoder_list:
node_name = self.server_ns.read_name_node_structure(node)
if str(self.selected_motor) in node_name:
self.encoder_label_update(node)
def save_motor_properties(self):
self.async_run_write_to_opc(self.motor_save_node, True)
self.message_box_show("New Settings Saved!")
self.async_run_write_to_opc(self.motor_save_node, False)
@pyqtSlot(int, int)
def device_status_update(self, node, data_val):
label_str = self.server_ns.read_label_node_structure(node)
if data_val == True:
getattr(self, label_str[0]).setStyleSheet(
"font: 15pt 'Webdings';color:rgb(85, 255, 0);")
else:
getattr(self, label_str[0]).setStyleSheet(
"font: 15pt 'Webdings';color:rgb(255, 0, 0);")
@pyqtSlot(int, str)
def oee_time_update(self, node_id:int, duration_str:str):
label_str = self.server_ns.read_label_node_structure(node_id)
self.label_updater(label_str, duration_str)
@pyqtSlot(int, int)
def module_status_update(self, node, data_value):
label_list = self.server_ns.read_label_node_structure(node)
label_object = getattr(self, label_list[0])
module_number = self.check_module_number(label_list[0])
module_label_object = getattr(self, f"module_{module_number}_label")
module_check_box_object = getattr(self, f"module_{module_number}_check_box")
module_check_box_object.setChecked(False)
module_label_object.setStyleSheet(f"background-color: rgb({self.rgb_input_tuple[1]});color: rgb(0, 0, 0);")
if data_value == 0:
label_object.setText("IDLE")
#self.logger_handler(
# 'INFO', f"Module {i+1} has Stopped")
self.logger_handler('INFO', f"Module {module_number} is Idling")
elif data_value == 1000:
label_object.setText("DISABLED")
module_check_box_object.setChecked(True)
module_label_object.setStyleSheet(f"background-color: rgb({self.rgb_output_tuple[1]});color: rgb(0, 0, 0);")
self.logger_handler('INFO', f"Module {module_number} is Disabled")
elif data_value == 2000:
label_object.setText("INITIALIZING")
self.logger_handler('INFO', f"Module {module_number} is Initializing")
elif data_value == 3000:
label_object.setText("INIT DONE")
self.logger_handler('INFO', f"Module {module_number} has Initialized")
elif data_value == 4000:
label_object.setText("RUNNING")
self.logger_handler('INFO', f"Module {module_number} is Operational")
elif data_value == 5000:
label_object.setText("ALARM")
self.logger_handler('INFO', f"Alarm at Module {module_number}")
def check_module_number(self, label_str):
if 'module_1' in label_str:
return 1
if 'module_2' in label_str:
return 2
if 'module_3' in label_str:
return 3
if 'module_4' in label_str:
return 4
if 'module_5' in label_str:
return 5
if 'module_6' in label_str:
return 6
def lot_entry_save(self):
operator_id_node = self.server_ns.lot_input_nodes[0]
recipe_id_node = self.server_ns.lot_input_nodes[1]
operator_id_input_object = self.server_ns.read_label_node_structure(operator_id_node)
recipe_input_object = self.server_ns.read_label_node_structure(recipe_id_node)
current_operator = getattr(self,operator_id_input_object[1]).text()
current_recipe = getattr(self,recipe_input_object[1]).text()
stored_recipe = self.async_run_read_from_opc(recipe_id_node)
if current_recipe or current_operator:
if current_recipe != stored_recipe:
self.reset_lot_oee()
self.op_recp_set_text(operator_id_input_object, recipe_input_object, current_operator, current_recipe)
else:
self.op_recp_set_text(operator_id_input_object, recipe_input_object, None, None)
def op_recp_set_text(self, operator_id_input_object, recipe_input_object, current_operator, current_recipe):
if current_operator != None or current_recipe != None:
getattr(self,operator_id_input_object[0]).setText(current_operator)
getattr(self,recipe_input_object[0]).setText(current_recipe)
else:
getattr(self,operator_id_input_object[0]).setText('NA')
getattr(self,recipe_input_object[0]).setText('NA')
self.async_run_write_to_opc(self.server_ns.lot_input_nodes[0], current_operator)
self.async_run_write_to_opc(self.server_ns.lot_input_nodes[1], current_recipe)
def logger_handler(self, log_type: str, log_msg: str):
"""[summary]
Args:
log_type (str): choose either 'INFO' or 'ALARM'
msg (str): message to show at text box
"""
current_time = datetime.now()
time = (current_time.strftime("%d-%m-%Y | %H:%M:%S.%f")).split('.')[0]
if log_type == 'ALARM':
msg = f"{time} | {log_type} | #{log_msg}"
self.alarm_log_text_edit.appendPlainText(msg)
elif log_type == 'INFO':
msg = f"{time} | {log_type} | {log_msg}"
self.event_log_text_edit.appendPlainText(msg)
def uph_update_plot(self):
current_value = [self.async_run_read_from_opc(node) for node in self.server_ns.uph_plot_node]
for rect, h in zip(self.plot_bar, current_value):
rect.set_height(h)
if len(self.plot_text) != 0:
for text in self.plot_text:
text.set_visible(False)
self.plot_text.clear()
for i, v in enumerate(current_value):
self.plot_text.append(self.MplWidget.canvas.ax.text(
i - 0.3, v + 80, str(v), color='red', fontsize=10, rotation=90))
self.MplWidget.canvas.ax.relim()
self.MplWidget.canvas.ax.autoscale_view()
self.MplWidget.canvas.draw()
def async_run_wp_serial_pn_gen(self, namespace_index, track_number):
self.selected_track_number = track_number
unit_present, runner_count,wp_part_number,wp_dimension,bcr_1_status,bcr_2_status,wp_validation_status,wp_serial = asyncio.run(self.client_wp_serial_pn_gen(namespace_index, track_number))
self.id_track_count.setText(runner_count)
self.id_track_part_num.setText(wp_part_number)
self.id_track_rc.setText(str(wp_dimension))
self.id_track_status_conv("id_track_bcr1", bcr_1_status)
self.id_track_status_conv("id_track_bcr2", bcr_2_status)
self.id_track_status_conv("id_track_wp_validation", wp_validation_status)
if unit_present == 0:
wp_serial = "N/A"
self.id_track_serial.setText(wp_serial)
def id_track_status_conv(self, label_str, bcr_2_status):
if bcr_2_status == 1:
self.label_updater(label_str,"PASS")
elif bcr_2_status == 2:
self.label_updater(label_str,"FAIL")
else:
self.label_updater(label_str,"N/A")
@pyqtSlot(int, int)
def id_track_update(self, node_id, data_value):
label_str = self.server_ns.read_label_node_structure(node_id)[0]
track_number = label_str.split('_')[1]
button_name = f"track_{track_number}_button"
label_name = f"track_{track_number}_label"
button_object = getattr(self,button_name)
label_object = getattr(self,label_name)
runner_count = self.async_run_read_from_opc(node_id+1)
runner_count_str = byte_swap_method(runner_count)
button_object.setText(runner_count_str)
label_object.setText(runner_count_str)
green_light_on = f"background-color: rgb({self.rgb_input_tuple[1]});color: rgb(0, 0, 0);"
green_light_off = f"background-color: rgb({self.rgb_input_tuple[0]});color: rgb(0, 0, 0);"
if data_value == 1:
button_object.setStyleSheet(green_light_on)
label_object.setStyleSheet(green_light_on)
elif data_value == 0:
button_object.setStyleSheet(green_light_off)
label_object.setStyleSheet(green_light_off)
def machine_status_update(self):#, node_id, data_value):
machine_state = [int(self.async_run_read_from_opc(node_id)) for node_id in self.server_ns.machine_status_node]
try:
machine_idx = machine_state.index(1)
except:
machine_idx = None
if machine_idx != None:
machine_status_node = self.server_ns.machine_status_node[machine_idx]
machine_status_name = self.server_ns.read_name_node_structure(machine_status_node)
self.machine_status_label.setText(machine_status_name)
if machine_status_name == "RUNNING" and self.lot_start_datetime_label.text() != '0':
start_date_time = f"{self.date_days_label.text()}/{self.date_month_label.text()}/{self.date_year_label.text()} {self.time_hours_label.text()}:{self.time_minutes_label.text()}"
self.lot_start_datetime_label.setText(start_date_time)
else:
self.machine_status_label.setText("IDLE")
@pyqtSlot(int, int)
def alarm_status_update(self, node_id, data_value):
if node_id == self.server_ns.alarm_nodes[0]:
alarm_label = self.server_ns.read_label_node_structure(node_id)
if data_value > 0:
self.blinkingtimer.start(500)
try:
message = self.server_ns.alarm_table[data_value]
except:
message = "NO Description"
alarm_message = f"{data_value}-{message}"
self.label_updater(alarm_label,alarm_message)
self.logger_handler('ALARM', f"{data_value}-{alarm_message}")
elif data_value == 0:
self.blinkingtimer.stop()
self.alarm_bliking_colour_scheme(None)
self.alarm_state = False
self.label_updater(alarm_label,"")
elif node_id != self.server_ns.alarm_nodes[0] and data_value > 0:
other_alarm_message = self.server_ns.alarm_table[data_value]
self.logger_handler('ALARM', f"{data_value}-{other_alarm_message}")
def alarm_blinking(self):
self.alarm_state = not self.alarm_state
self.alarm_bliking_colour_scheme(self.alarm_state)
def alarm_bliking_colour_scheme(self, state):
if state == None:
# frame_5
self.machine_status_title_label.setStyleSheet(self.default_font_colour)
self.machine_status_label.setStyleSheet(self.default_font_colour)
self.frame_5.setStyleSheet(self.default_bg_colour)
# frame_9
self.frame_9.setStyleSheet(self.white_bg)
self.alarm_label_title.setStyleSheet("")
self.alarm_label.setStyleSheet("")
elif state == True:
# frame_5
self.machine_status_title_label.setStyleSheet(self.red_font_color) # red
self.machine_status_label.setStyleSheet(self.red_font_color) # red
self.frame_5.setStyleSheet(self.white_bg) # white
# frame_9
self.frame_9.setStyleSheet(self.white_bg)
self.alarm_label_title.setStyleSheet(self.red_font_color)
self.alarm_label.setStyleSheet(self.red_font_color)
elif state == False:
# frame_5
self.machine_status_title_label.setStyleSheet(self.white_font_color) # white
self.machine_status_label.setStyleSheet(self.white_font_color) # white
self.frame_5.setStyleSheet(self.red_bg) # red
# frame_9
self.frame_9.setStyleSheet(self.red_bg)
self.alarm_label_title.setStyleSheet(self.white_font_color)
self.alarm_label.setStyleSheet(self.white_font_color)
async def client_wp_serial_pn_gen(self, namespace_index, track_number):
return await self.wp_serial_pn_gen(namespace_index, track_number)
def async_run_read_from_opc(self, node_id):
return asyncio.run(self.client_read_from_opc(node_id))
def async_run_write_to_opc(self, node_id, data_value):
asyncio.run(self.client_write_to_opc(node_id, data_value))
async def client_read_from_opc(self, node_id):
return await self.read_from_opc(node_id,2)
async def client_write_to_opc(self, node_id, data_value):
#data_type = self.server_ns.read_data_type_node_structure(node_id)
await self.write_to_opc(node_id,2,data_value)
@pyqtSlot(int)
def encoder_label_update(self, node):
label_str = self.server_ns.read_label_node_structure(node)
#selected_motor = f"encoder_motor_{self.selected_motor}"
label_name = self.server_ns.read_name_node_structure(node)
if str(self.selected_motor) in label_name:
data_value = self.async_run_read_from_opc(node)
self.label_updater(label_str, data_value)
@pyqtSlot(int, int)
def input_label_update(self, node_id, data_value):
label_list = self.server_ns.read_label_node_structure(node_id)
for label in label_list:
label_object = getattr(self, label)
label_object.setStyleSheet(
f"background-color: rgb({self.rgb_input_tuple[data_value]});color: rgb(0, 0, 0);")
if node_id == 11003:
self.emo_label.setVisible(not data_value)
@pyqtSlot(int, int)
def output_label_update(self, node_id, data_value):
label_list = self.server_ns.read_label_node_structure(node_id)
for label in label_list:
label_object = getattr(self, label)
label_object.setStyleSheet(
f"background-color: rgb({self.rgb_output_tuple[data_value]});color: rgb(0, 0, 0);")
@pyqtSlot(list, str)
def label_updater(self, label_list:str, label_str:str):
"""update label or ui element
Args:
label_list (str): name of the said label of be update. | |
# -*- coding: utf-8 -*-
"""
flask_oauth
~~~~~~~~~~~
Implements basic OAuth support for Flask.
:copyright: (c) 2010 by <NAME>.
:license: BSD, see LICENSE for more details.
"""
import httplib2
from functools import wraps
from urllib.parse import urljoin
from flask import request, session, json, redirect, Response
from werkzeug import url_decode, url_encode, url_quote, \
parse_options_header, Headers
import oauth2
_etree = None
def get_etree():
"""Return an elementtree implementation. Prefers lxml"""
global _etree
if _etree is None:
try:
from lxml import etree as _etree
except ImportError:
try:
from xml.etree import cElementTree as _etree
except ImportError:
try:
from xml.etree import ElementTree as _etree
except ImportError:
raise TypeError('lxml or etree not found')
return _etree
def parse_response(resp, content, strict=False):
ct, options = parse_options_header(resp['content-type'])
if ct in ('application/json', 'text/javascript'):
return json.loads(content)
elif ct in ('application/xml', 'text/xml'):
# technically, text/xml is ascii based but because many
# implementations get that wrong and utf-8 is a superset
# of utf-8 anyways, so there is not much harm in assuming
# utf-8 here
charset = options.get('charset', 'utf-8')
return get_etree().fromstring(content.decode(charset))
elif ct != 'application/x-www-form-urlencoded':
if strict:
return content
charset = options.get('charset', 'utf-8')
return url_decode(content, charset=charset).to_dict()
def add_query(url, args):
if not args:
return url
return url + ('?' in url and '&' or '?') + url_encode(args)
def encode_request_data(data, format):
if format is None:
return data, None
elif format == 'json':
return json.dumps(data or {}), 'application/json'
elif format == 'urlencoded':
return url_encode(data or {}), 'application/x-www-form-urlencoded'
raise TypeError('Unknown format %r' % format)
class OAuthResponse(object):
"""Contains the response sent back from an OAuth protected remote
application.
"""
def __init__(self, resp, content):
#: a :class:`~werkzeug.Headers` object with the response headers
#: the application sent.
self.headers = Headers(resp)
#: the raw, unencoded content from the server
self.raw_data = content
#: the parsed content from the server
self.data = parse_response(resp, content, strict=True)
@property
def status(self):
"""The status code of the response."""
return self.headers.get('status', type=int)
class OAuthClient(oauth2.Client):
def request_new_token(self, uri, callback=None, params={}):
if callback is not None:
params['oauth_callback'] = callback
req = oauth2.Request.from_consumer_and_token(
self.consumer, token=self.token,
http_method='POST', http_url=uri, parameters=params,
is_form_encoded=True)
req.sign_request(self.method, self.consumer, self.token)
body = req.to_postdata()
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Content-Length': str(len(body))
}
return httplib2.Http.request(self, uri, method='POST',
body=body, headers=headers)
class OAuthException(RuntimeError):
"""Raised if authorization fails for some reason."""
message = None
type = None
def __init__(self, message, type=None, data=None):
#: A helpful error message for debugging
self.message = message
#: A unique type for this exception if available.
self.type = type
#: If available, the parsed data from the remote API that can be
#: used to pointpoint the error.
self.data = data
def __str__(self):
return self.message.encode('utf-8')
def __unicode__(self):
return self.message
class OAuth(object):
"""Registry for remote applications. In the future this will also
be the central class for OAuth provider functionality.
"""
def __init__(self):
self.remote_apps = {}
def remote_app(self, name, register=True, **kwargs):
"""Registers a new remote applicaton. If `param` register is
set to `False` the application is not registered in the
:attr:`remote_apps` dictionary. The keyword arguments are
forwarded to the :class:`OAuthRemoteApp` consturctor.
"""
app = OAuthRemoteApp(self, name, **kwargs)
if register:
assert name not in self.remote_apps, \
'application already registered'
self.remote_apps[name] = app
return app
class OAuthRemoteApp(object):
"""Represents a remote application.
:param oauth: the associated :class:`OAuth` object.
:param name: then name of the remote application
:param request_token_url: the URL for requesting new tokens
:param access_token_url: the URL for token exchange
:param authorize_url: the URL for authorization
:param consumer_key: the application specific consumer key
:param consumer_secret: the application specific consumer secret
:param request_token_params: an optional dictionary of parameters
to forward to the request token URL
or authorize URL depending on oauth
version.
:param access_token_params: an option diction of parameters to forward to
the access token URL
:param access_token_method: the HTTP method that should be used
for the access_token_url. Defaults
to ``'GET'``.
"""
def __init__(self, oauth, name, base_url,
request_token_url,
access_token_url, authorize_url,
consumer_key, consumer_secret,
request_token_params=None,
access_token_params=None,
access_token_method='GET'):
self.oauth = oauth
#: the `base_url` all URLs are joined with.
self.base_url = base_url
self.name = name
self.request_token_url = request_token_url
self.access_token_url = access_token_url
self.authorize_url = authorize_url
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.tokengetter_func = None
self.request_token_params = request_token_params or {}
self.access_token_params = access_token_params or {}
self.access_token_method = access_token_method
self._consumer = oauth2.Consumer(self.consumer_key,
self.consumer_secret)
self._client = OAuthClient(self._consumer)
def status_okay(self, resp):
"""Given request data, checks if the status is okay."""
try:
return int(resp['status']) in (200, 201)
except ValueError:
return False
def get(self, *args, **kwargs):
"""Sends a ``GET`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'GET'
return self.request(*args, **kwargs)
def post(self, *args, **kwargs):
"""Sends a ``POST`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'POST'
return self.request(*args, **kwargs)
def put(self, *args, **kwargs):
"""Sends a ``PUT`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'PUT'
return self.request(*args, **kwargs)
def delete(self, *args, **kwargs):
"""Sends a ``DELETE`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'DELETE'
return self.request(*args, **kwargs)
def make_client(self, token=None):
"""Creates a new `oauth2` Client object with the token attached.
Usually you don't have to do that but use the :meth:`request`
method instead.
"""
return oauth2.Client(self._consumer, self.get_request_token(token))
def request(self, url, data="", headers=None, format='urlencoded',
method='GET', content_type=None, token=None):
"""Sends a request to the remote server with OAuth tokens attached.
The `url` is joined with :attr:`base_url` if the URL is relative.
.. versionadded:: 0.12
added the `token` parameter.
:param url: where to send the request to
:param data: the data to be sent to the server. If the request method
is ``GET`` the data is appended to the URL as query
parameters, otherwise encoded to `format` if the format
is given. If a `content_type` is provided instead, the
data must be a string encoded for the given content
type and used as request body.
:param headers: an optional dictionary of headers.
:param format: the format for the `data`. Can be `urlencoded` for
URL encoded data or `json` for JSON.
:param method: the HTTP request method to use.
:param content_type: an optional content type. If a content type is
provided, the data is passed as it and the
`format` parameter is ignored.
:param token: an optional token to pass to tokengetter. Use this if you
want to support sending requests using multiple tokens.
If you set this to anything not None, `tokengetter_func`
will receive the given token as an argument, in which case
the tokengetter should return the `(token, secret)` tuple
for the given token.
:return: an :class:`OAuthResponse` object.
"""
headers = dict(headers or {})
client = self.make_client(token)
url = self.expand_url(url)
if method == 'GET':
assert format == 'urlencoded'
if data:
url = add_query(url, data)
data = ""
else:
if content_type is None:
data, content_type = encode_request_data(data, format)
if content_type is not None:
headers['Content-Type'] = content_type
return OAuthResponse(*client.request(url, method=method,
body=data or '',
headers=headers))
def expand_url(self, url):
return urljoin(self.base_url, url)
def generate_request_token(self, callback=None):
if callback is not None:
callback = urljoin(request.url, callback)
resp, content = self._client.request_new_token(
self.expand_url(self.request_token_url), callback,
self.request_token_params)
if not self.status_okay(resp):
raise OAuthException('Failed to generate request token',
type='token_generation_failed')
data = parse_response(resp, content)
if data is None:
raise OAuthException('Invalid token response from ' + self.name,
type='token_generation_failed')
tup = (data['oauth_token'], data['oauth_token_secret'])
session[self.name + '_oauthtok'] = tup
return tup
def get_request_token(self, token=None):
assert self.tokengetter_func is not None, 'missing tokengetter function'
# Don't pass the token if the token is None to support old
# tokengetter functions.
rv = self.tokengetter_func(*(token and (token,) or ()))
if rv is None:
rv = session.get(self.name + '_oauthtok')
if rv is None:
raise OAuthException('No token available', type='token_missing')
return oauth2.Token(*rv)
def free_request_token(self):
session.pop(self.name + '_oauthtok', None)
session.pop(self.name + '_oauthredir', None)
def authorize(self, callback=None):
"""Returns a redirect response to the remote authorization URL with
the signed callback given. The callback must be `None` in which
case the application will most likely switch to PIN based authentication
or use a remotely stored callback URL. Alternatively it's an URL
on the system that has to be decorated as :meth:`authorized_handler`.
"""
if self.request_token_url:
token = self.generate_request_token(callback)[0]
url = '%s?oauth_token=%s' % (self.expand_url(self.authorize_url),
url_quote(token))
else:
assert callback is not None, 'Callback is required OAuth2'
# This is for things like facebook's | |
<gh_stars>1-10
r"""The field module calculates the potentials and fields of silicon sensors.
The potential is determined numerically by solving these equations on a mesh:
.. math:: \nabla^2 \Phi = 0
:label: laplace
.. math:: \nabla^2 \Phi = \frac{\rho}{\epsilon}
:label: poisson
For the weighting potential equation :eq:`laplace` is solved with the
boundary conditions:
.. math::
\begin{eqnarray}
\Phi & = & 0 \\
\Phi_r & = & 1
\end{eqnarray}
The pixel readout electrode(s) are at a potential 1 and all other equipotential
pixel parts (backside, bias columns, etc.) at 0.
For the electric potential the equation :eq:`poisson` is solved with the
boundary conditions:
.. math::
\begin{eqnarray}
\Phi & = & V_{bias} \\
\Phi_r & = & V_{readout}
\end{eqnarray}
The pixel readout electrode(s) are at :math:`V_{readout}` potential and the bias parts
(backside, bias columns, etc.) are at :math:`V_{bias}`.
The field is then derived via:
.. math::
\vec{E} = -\nabla \phi
.. NOTE::
For simple cases (e.g. planar sensor with 100% fill factor) also analytical
solutions are provided. The analytical results are also used to benchmark
the numerical results in the automated unit tests.
"""
import fipy
import numpy as np
import logging
from scipy.interpolate import interp1d, RectBivariateSpline, griddata, interp2d, SmoothBivariateSpline
from scipy import constants
from scarce import silicon
from scarce import geometry
from scarce import constant as C
from scarce import solver
_LOGGER = logging.getLogger(__name__)
class Description(object):
''' Class to describe potential and field at any
point in space. The numerical potential estimation
is used and interpolated. The field is derived from
a smoothed potential interpolation to minimize
numerical instabilities.
'''
def __init__(self, potential, min_x, max_x, min_y, max_y,
nx, ny, smoothing=0.1):
_LOGGER.debug('Create potential and field description')
self.pot_data = potential
try:
self.depletion_data = np.array(
[self.pot_data.depletion[0], self.pot_data.depletion[1]])
except AttributeError:
self.depletion_data = None
self.min_x = min_x
self.min_y = min_y
self.max_x = max_x
self.max_y = max_y
self._nx = nx
self._ny = ny
self.potential_grid_inter = self.interpolate_potential(self.pot_data)
self.smoothing = smoothing
# Do not calculate field on init, since it is time consuming
# and maybe not needed
self.pot_smooth = None
self.field_x = None
self.field_y = None
# Do not calculate depletion boundaries on init
# since it is time consuming and maybe not needed
self.depletion_region = None
self._x = np.linspace(self.min_x, self.max_x, nx)
self._y = np.linspace(self.min_y, self.max_y, ny)
# Create sparse x,y plot grid
self._xx, self._yy = np.meshgrid(self._x, self._y, sparse=True)
# Potential interpolated on a grid with NaN set to closest value
self.potential_grid = self.potential_grid_inter(self._xx, self._yy)
# self._extrapolate_boundary()
self.potential_grid = self._interpolate_nan(self.potential_grid)
def _extrapolate_boundary(self):
''' Extrapolate the potental at the boundary with constant gradient
to prevent oscillations when smoothing.
'''
# Extend array by 10% in y direction, y direction is
# in dimension 0 here
y_shape = int(self.potential_grid.shape[0] * 1.0)
# Difference to old shape
dy_shape = y_shape - self.potential_grid.shape[0]
# Create extended array and fill with old data
new_potential_grid = np.zeros(
shape=(y_shape, self.potential_grid.shape[1])) # ax2.plot(desc._y, -np.gradient(desc.potential_grid.T[xi, :], desc._y[1]-desc._y[0], edge_order=1), '-', label='DIFF N')
new_potential_grid[dy_shape:, :] = self.potential_grid
# Calculate the gradient in y for linear extrapolation
dy = self.potential_grid[0, :] - self.potential_grid[1, :]
# Extrapolate at the extended area
slope = (np.tile(np.arange(1, dy_shape + 1)[::-1],
(self.potential_grid.shape[1], 1))).T * dy[np.newaxis, :]
new_potential_grid[:dy_shape] = self.potential_grid[0, :] + slope
# [:new_potential_grid.shape[0] - dy_shape, :]
self.potential_grid = new_potential_grid
# Also extend y positions
self._y = np.linspace(
self.min_y - dy_shape * np.diff(self._y)[0], self.max_y,
self._ny + dy_shape)
print 'self._y', self._y
# raise
def interpolate_potential(self, potential=None):
''' Interpolates the potential on a grid.
'''
_LOGGER.debug('Interpolate potential')
if potential is None:
potential = self.pot_data
points = np.array(potential.mesh.getFaceCenters()).T
values = np.array(np.array(potential.arithmeticFaceValue()))
def grid_interpolator(grid_x, grid_y):
return griddata(points=points,
values=values,
xi=(grid_x, grid_y),
method='linear',
rescale=False,
fill_value=np.nan) # values.max())
return grid_interpolator
def get_potential_minimum(self, axis=None):
''' Returns the minimum potential value
'''
return self.potential_grid.min(axis=axis)
def get_potential_minimum_pos_y(self):
''' Returns the position in the array with
the potential minimum
'''
return self._y[np.argmin(self.potential_grid, axis=0)]
def get_depletion(self, x):
''' Returns the depletion boundary at x.
For planar sensors only!
'''
if not self.depletion_region: # Calculate on demand to safe time
_LOGGER.debug('Calculate depletion region description')
if self.depletion_data is not None:
self.depletion_region = interp1d(x=self.depletion_data[0],
y=self.depletion_data[1],
kind='cubic'
)
else:
raise RuntimeError(
'The data does not have depletion information.')
return self.depletion_region(x)
def get_depl_mask(self, x=None, y=None):
''' Returns true for all points outside of the depletion zone
'''
if x is None or y is None:
x = np.array(self.pot_data.mesh.x)
y = np.array(self.pot_data.mesh.y)
mask = np.zeros_like(x, dtype=np.bool)
mask[y > self.get_depletion(x)] = True
return mask
def get_potential(self, x, y):
return self.potential_grid_inter(x, y)
def get_potential_smooth(self, x, y):
if self.pot_smooth is None:
self._smooth_potential()
return self.pot_smooth(x, y, grid=False)
def get_field(self, x, y):
''' Returns the field in V/um at different positions.
Parameters
----------
x, y : array_like
Particle x, y positions
'''
if self.field_x is None or self.field_y is None:
self._derive_field()
return np.array([self.field_x(x, y, grid=False),
self.field_y(x, y, grid=False)])
def _smooth_potential(self, smoothing=None):
''' This function takes the potential grid interpolation
and smooths the data points.
Smoothing is really buggy in scipy, the only
working way is to smooth on a grid. Thus mesh points
of the potential solution cannot be used directly.
'''
_LOGGER.debug('Calculate smoothed potential description')
if not smoothing:
smoothing = self.smoothing
# Scale potential to make interpolation independent of bias
v_min = np.nanmin(self.potential_grid)
v_max = np.nanmax(self.potential_grid)
# Scale potential to be within 0 .. 1
potential_scaled = (self.potential_grid - v_min) / (v_max - v_min)
def interpolator(x, y, **kwarg):
func = RectBivariateSpline(self._x, self._y, potential_scaled.T,
s=smoothing, kx=3, ky=3)
# func = interp2d(self._x, self._y, potential_scaled, kind='cubic')
# return func(x, y) * (v_max - v_min) + v_min
return func(x, y, **kwarg) * (v_max - v_min) + v_min
# Smooth on the interpolated grid
self.pot_smooth = interpolator
def _derive_field(self):
''' Takes the potential to calculate the field in x, y
via E_x, E_y = - grad(Potential)
with spline interpolation and smoothing.
'''
_LOGGER.debug('Calculate field from potential')
if not self.pot_smooth:
self._smooth_potential()
E_x, E_y = np.gradient(-self.pot_smooth(self._x,
self._y,
grid=True),
np.diff(self._x)[0], np.diff(self._y)[0])
# Create spline interpolators for E_x,E_y
self.field_x = RectBivariateSpline(
self._x, self._y, E_x, s=0, kx=3, ky=3)
self.field_y = RectBivariateSpline(
self._x, self._y, E_y, s=0, kx=3, ky=3)
def _interpolate_nan(self, a):
''' Fills nans with closest non nan value.
Might not work well for multi dimensional arrays. :TODO:
'''
mask = np.isnan(a)
a[mask] = np.interp(
np.flatnonzero(mask), np.flatnonzero(~mask), a[~mask])
return a
def calculate_planar_sensor_w_potential(mesh, width, pitch,
n_pixel, thickness):
''' Calculates the weighting field of a planar sensor.
'''
_LOGGER.info('Calculating weighting potential')
# Mesh validity check
mesh_width = mesh.getFaceCenters()[0, :].max(
) - mesh.getFaceCenters()[0, :].min()
if mesh_width != width * n_pixel:
raise ValueError(
'The provided mesh width does not correspond to the sensor width')
if mesh.getFaceCenters()[1, :].min() != 0:
raise ValueError('The provided mesh does not start at 0.')
if mesh.getFaceCenters()[1, :].max() != thickness:
raise ValueError('The provided mesh does not end at sensor thickness.')
potential = fipy.CellVariable(mesh=mesh, name='potential', value=0.)
permittivity = 1.
potential.equation = (fipy.DiffusionTerm(coeff=permittivity) == 0.)
# Calculate boundaries
backplane = mesh.getFacesTop()
readout_plane = mesh.getFacesBottom()
electrodes = readout_plane
bcs = [fipy.FixedValue(value=0., faces=backplane)]
X, _ = mesh.getFaceCenters()
for pixel in range(n_pixel):
pixel_position = width * (pixel + 1. / 2.) - width * n_pixel / 2.
bcs.append(fipy.FixedValue(value=1.0 if pixel_position == 0. else 0.,
faces=electrodes &
(X > pixel_position - pitch / 2.) &
(X < pixel_position + pitch / 2.)))
solver.solve(
potential, equation=potential.equation, boundaryConditions=bcs)
return potential
def calculate_planar_sensor_potential(mesh, width, pitch, n_pixel, thickness,
n_eff, V_bias, V_readout, V_bi=0):
''' Calculates the potential of a planar sensor.
Parameters
----------
mesh : fipy.Gmsh2D
Mesh where to solve the poisson equation
width : number
Width of one pixel in :math:`\mathrm{\mu m}`
pitch : number
The width of the readout electrode in :math:`\mathrm{\mu m}`
n_pixel : int
Number of pixels
thickness : number
Thickness of the sensor in :math:`\mathrm{\mu m}`
n_eff : number
Effective doping concentration in :math:`\mathrm{\frac{1}{cm^3}}`
V_bias : number
Bias voltage in Volt
V_readout : number
Readout voltage in Volt
V_bi : number
Build in voltage. Can be calculated by
scarce.silicon.get_diffusion_potential(...).
Notes
-----
So far the depletion zone in the case of a underdepleted sensor is only
calculated as a constant y boundary. This is wrong for pixels with low
fill factor.
'''
_LOGGER.info('Calculating potential')
# Mesh validity check
min_x = float(mesh.getFaceCenters()[0, :].min())
max_x = float(mesh.getFaceCenters()[0, :].max())
min_y = float(mesh.getFaceCenters()[1, :].min())
max_y = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.