text stringlengths 0 1.05M | meta dict |
|---|---|
ALERT_RESP = {
"primary_id": 3232,
"alert_type": {
"id": 1793,
"created_at": "2019-05-25T19:40:09.132456Z",
"updated_at": "2019-08-12T18:40:12.132456Z",
"type_id": "8916-1b5d68c0519f",
"category": "Host",
"detail_fields": [
"username"
],
"is_default": False,
"is_internal": True,
"name": "HX",
"summary_fields": [
"malwaretype",
"virus"
],
"source": [
"agenthostname",
"agentip"
],
"destination": [],
"created_by": "id",
"updated_by": "id"
},
"assigned_to": None,
"context": None,
"created_by": {
"id": "id",
"avatar": "avatar",
"name": "System User",
"username": "system_user",
"primary_email": "no.reply@fireeye.com"
},
"events_count": 2,
"notes_count": 0,
"queues": [
"Default Queue"
],
"source_url": "https://url",
"updated_by": {
"id": "id",
"avatar": "avatar",
"name": "George",
"username": "george@demisto.com",
"primary_email": "george@demisto.com"
},
"organization": "demisto",
"created_at": "2019-03-30T19:40:16.132456Z",
"updated_at": "2019-10-20T12:35:02.132456Z",
"id": 123,
"alert_threat": "Unknown",
"alert_type_details": {
"source": "siem",
"detail": {
"username": "demon",
"processpath": "c:\\windows\\microsoft.net\\framework\\v7.0.30319\\csc.exe",
"confidence": "high",
"sha1": "sha1",
"agenthostname": "siem",
"pid": 11,
"objecttype": "file",
"hostname": "helix.apps.fireeye.com",
"bytes": 35,
"meta_deviceid": "deviceID",
"agentip": "192.168.0.1",
"virus": "gen:variant.ursu",
"result": "quarantined",
"malwaretype": "malware",
"createdtime": "2019-03-30T14:07:53.667Z",
"lastmodifiedtime": "2019-03-31T14:07:53.778Z",
"filename": "c:\\users\\demon\\appdata\\local\\temp",
"accountdomain": "siem",
"method": "oas",
"lastaccessedtime": "2019-03-30T14:07:53.217Z",
"md5": "md5"
},
"summary": {
"virus": "gen:variant.ursu",
"malwaretype": "malware"
}
},
"assigned_at": None,
"classification": 30,
"closed_reason": "",
"closed_state": "Unknown",
"confidence": "High",
"description": "FireEye HX detected and quarantined malware on this system.",
"distinguisher_key": "quarantined",
"distinguishers": {
"virus": "gen:variant.ursu",
"agentid": "4fkds",
"result": "quarantined",
"malwaretype": "malware"
},
"emailed_at": 7371,
"events_threshold": 1,
"external_id": "",
"first_event_at": "2019-03-30T14:07:34.132456ZZ",
"last_event_at": "2019-03-31T14:08:07.132456ZZ",
"external_ips": [],
"external_ips_count": 0,
"info_links": [],
"internal_ips": [],
"internal_ips_count": 0,
"is_suppressed": False,
"is_threat": False,
"is_tuned": False,
"kill_chain": [
"5 - Installation"
],
"last_sync_ms": 15535426,
"message": "FIREEYE H",
"metaclasses": {
"ids,antivirus": 2
},
"mongo_id": "5c99",
"origin_id": "map_rule",
"products": {
"hx": 2
},
"risk": "Medium",
"risk_order": 2,
"search": "class=fireeye_hx_alert eventlog=mal result=quarantined NOT srcipv4:$exclusions.global.srcipv4",
"seconds_threshold": 60,
"severity": "Medium",
"source_revision": 0,
"state": "Open",
"tags": [
"fireeye"
],
"threat_changed_at": None,
"threat_type": 50,
"trigger_id": "2615",
"trigger_revision": 0,
"tuning_search": "",
"type": "fireeye_rule"
}
ALERTS_RESP = {
"meta": {
"count": 115,
"previous": None,
"limit": 2,
"offset": 0,
"next": ""
},
"results": [
{
"primary_id": 3232,
"alert_type": {
"id": 1793,
"created_at": "2019-05-25T19:40:09.132456Z",
"updated_at": "2019-08-12T18:40:12.132456Z",
"type_id": "8916-1b5d68c0519f",
"category": "Host",
"detail_fields": [
"username"
],
"is_default": False,
"is_internal": True,
"name": "HX",
"summary_fields": [
"malwaretype",
"virus"
],
"source": [
"agenthostname",
"agentip"
],
"destination": [],
"created_by": "id",
"updated_by": "id"
},
"assigned_to": None,
"context": None,
"created_by": {
"id": "id",
"avatar": "avatar",
"name": "System User",
"username": "system_user",
"primary_email": "no.reply@fireeye.com"
},
"events_count": 2,
"notes_count": 0,
"queues": [
"Default Queue"
],
"source_url": "https://url",
"updated_by": {
"id": "id",
"avatar": "avatar",
"name": "George",
"username": "george@demisto.com",
"primary_email": "george@demisto.com"
},
"organization": "demisto",
"created_at": "2019-03-30T19:40:16.132456Z",
"updated_at": "2019-10-20T12:35:02.132456Z",
"id": 123,
"alert_threat": "Unknown",
"alert_type_details": {
"source": "siem",
"detail": {
"username": "demon",
"processpath": "c:\\windows\\microsoft.net\\framework\\v7.0.30319\\csc.exe",
"confidence": "high",
"sha1": "sha1",
"agenthostname": "siem",
"pid": 11,
"objecttype": "file",
"hostname": "helix.apps.fireeye.com",
"bytes": 35,
"meta_deviceid": "deviceID",
"agentip": "192.168.0.1",
"virus": "gen:variant.ursu",
"result": "quarantined",
"malwaretype": "malware",
"createdtime": "2019-03-30T14:07:53.667Z",
"lastmodifiedtime": "2019-03-31T14:07:53.778Z",
"filename": "c:\\users\\demon\\appdata\\local\\temp",
"accountdomain": "siem",
"method": "oas",
"lastaccessedtime": "2019-03-30T14:07:53.217Z",
"md5": "md5"
},
"summary": {
"virus": "gen:variant.ursu",
"malwaretype": "malware"
}
},
"assigned_at": None,
"classification": 30,
"closed_reason": "",
"closed_state": "Unknown",
"confidence": "High",
"description": "FireEye HX detected and quarantined malware on this system.",
"distinguisher_key": "quarantined",
"distinguishers": {
"virus": "gen:variant.ursu",
"agentid": "4fkds",
"result": "quarantined",
"malwaretype": "malware"
},
"emailed_at": 7371,
"events_threshold": 1,
"external_id": "",
"first_event_at": "2019-03-30T14:07:34.132456ZZ",
"last_event_at": "2019-03-31T14:08:07.132456ZZ",
"external_ips": [],
"external_ips_count": 0,
"info_links": [],
"internal_ips": [],
"internal_ips_count": 0,
"is_suppressed": False,
"is_threat": False,
"is_tuned": False,
"kill_chain": [
"5 - Installation"
],
"last_sync_ms": 15535426,
"message": "FIREEYE H",
"metaclasses": {
"ids,antivirus": 2
},
"mongo_id": "5c99",
"origin_id": "map_rule",
"products": {
"hx": 2
},
"risk": "Medium",
"risk_order": 2,
"search": "class=fireeye_hx_alert eventlog=mal result=quarantined NOT srcipv4:$exclusions.global.srcipv4",
"seconds_threshold": 60,
"severity": "Medium",
"source_revision": 0,
"state": "Open",
"tags": [
"fireeye"
],
"threat_changed_at": None,
"threat_type": 50,
"trigger_id": "2615",
"trigger_revision": 0,
"tuning_search": "",
"type": "fireeye_rule"
},
{
"primary_id": 23,
"alert_type": {
"id": 18,
"created_at": "2019-03-25T10:40:09.132456Z",
"updated_at": "2019-09-10T18:40:13.132456Z",
"type_id": "03e1099a-38d8",
"category": "Host",
"detail_fields": [
"eventtime"
],
"is_default": False,
"is_internal": True,
"name": "HX",
"summary_fields": [
"result",
"iocnames"
],
"source": [
"agenthostname",
"agentip"
],
"destination": [],
"created_by": "ab",
"updated_by": "ab"
},
"assigned_to": None,
"context": None,
"created_by": {
"id": "ab",
"avatar": "avatar",
"name": "System User",
"username": "system_user",
"primary_email": "no.reply@fireeye.com"
},
"events_count": 2,
"notes_count": 0,
"queues": [
"Default Queue"
],
"source_url": "https://source_url.com",
"updated_by": {
"id": "e7",
"avatar": "avatar",
"name": "George",
"username": "george@demisto.com",
"primary_email": "george@demisto.com"
},
"organization": "",
"created_at": "2019-03-30T19:40:17.132456Z",
"updated_at": "2019-10-23T20:35:02.132456Z",
"id": 32,
"alert_threat": "Unknown",
"alert_type_details": {
"source": "siem",
"detail": {
"username": "system",
"processpath": "c:\\windows\\system32\\cmd.exe",
"eventtime": "2019-03-30T14:11:31.000Z",
"hostname": "helix.apps.fireeye.com",
"iocnames": "cobalt strike",
"process": "cmd.exe",
"args": "cmd.exe /c echo zhfrlb",
"pid": 99,
"agentip": "192.168.0.1",
"meta_deviceid": "86",
"result": "alert",
"starttime": "2019-03-30T14:11:20.002Z",
"pprocess": "services.exe",
"ppid": 66,
"agenthostname": "siem",
"md5": "md5"
},
"summary": {
"result": "alert",
"iocnames": "cobalt strike"
}
},
"assigned_at": None,
"classification": 2,
"closed_reason": "",
"closed_state": "Unknown",
"confidence": "High",
"description": "This rule alerts on IOC.",
"distinguisher_key": "cobalt strike",
"distinguishers": {
"agentid": "fw",
"iocnames": "cobalt strike"
},
"emailed_at": 737100,
"events_threshold": 1,
"external_id": "",
"first_event_at": "2019-03-25T14:09:45.132456Z",
"last_event_at": "2019-03-25T14:11:31.132456Z",
"external_ips": [],
"external_ips_count": 0,
"info_links": [],
"internal_ips": [],
"internal_ips_count": 0,
"is_suppressed": False,
"is_threat": False,
"is_tuned": False,
"kill_chain": [
"5 - Installation"
],
"last_sync_ms": 1553542006849,
"message": "FIREEYE HX [IOC Process Event]",
"metaclasses": {
"ids": 2
},
"mongo_id": "5c",
"origin_id": "map_rule",
"products": {
"hx": 2
},
"risk": "Medium",
"risk_order": 2,
"search": "class=fireeye_hx_alert eventlog=ioc eventtype=processevent NOT srcipv4:$exclusions.global.srcipv4",
"seconds_threshold": 60,
"severity": "Medium",
"source_revision": 0,
"state": "Open",
"tags": [
"fireeye",
"helixhxrule"
],
"threat_changed_at": None,
"threat_type": 50,
"trigger_id": "42399",
"trigger_revision": 0,
"tuning_search": "",
"type": "fireeye_rule"
}
]
}
CASES_BY_ALERT_RESP = {
"meta": {
"count": 1,
"previous": None,
"limit": 30,
"offset": 0,
"next": None
},
"results": [
{
"assigned_to": None,
"created_at": "created_at",
"created_by": {
"id": "id",
"avatar": "avatar",
"name": "name",
"username": "username",
"primary_email": "primary_email"
},
"description": "",
"events_count": 10,
"id": 35,
"info_links": [],
"name": "demisto test case",
"notes_count": 0,
"priority": "Critical",
"priority_order": 4,
"severity": 10,
"state": "Testing",
"status": "Declared",
"tags": [],
"total_days_unresolved": "16 23:52:09.819390",
"updated_at": "updated_at",
"updated_by": {
"id": "id",
"avatar": "avatar",
"name": "name",
"username": "username",
"primary_email": "primary_email"
}
}
]
}
ENDPOINTS_BY_ALERT_RESP = {
"meta": {
"count": 1,
"previous": None,
"limit": 30,
"offset": 0,
"next": None
},
"results": {
"status": "completed",
"endpoints": [
{
"id": 191,
"customer_id": "demisto",
"agent_id": "agent_id",
"containment_queued": False,
"containment_state": "normal",
"created_at": "created_at",
"device_id": "device_id",
"domain": "WORKGROUP",
"hostname": "Demisto",
"mac_address": "mac_address",
"operating_system": "Windows 10 Pro",
"primary_ip_address": "primary_ip_address",
"updated_at": "updated_at",
"timezone": "timezone",
"hash": "hash",
"source_url": "source_url"
}
]
}
}
EVENTS_BY_ALERT_RESP = {
"meta": {
"count": 10,
"previous": None,
"limit": 1,
"offset": 0,
"next": ""
},
"results": [
{
"username": "admin",
"_eventid": "",
"process": "net1",
"agenturi": "/hx/api/v3/hosts/f9zsksax",
"pid": 404,
"matched_at": "2019-08-11t06:51:40.000z",
"pprocesspath": "c:\\windows\\system32\\net1",
"result": "alert",
"meta_ts": "2019-09-11T06:51:40.000Z",
"processpath": "c:\\windows\\system32\\net1.exe",
"_errors": [],
"meta_agenturi": "/hx/api/v3/hosts/f9zsksax",
"meta_rule": "fireeye_hx_alert",
"indicator": {
"category": "custom",
"display_name": "tactic",
"url": "/hx/api/v3/indicators/custom/f9zsksax",
"signature": None,
"_id": "f9zsksax",
"uri_name": "f9zsksax"
},
"uuid": "f9zsksax",
"eventlog": "ioc",
"reported_at": "2019-09-13t06:53:08.000",
"eventtype": "processevent",
"msr_ruleids": [],
"agentstatus": "normal",
"condition": {
"indicators": [
{
"category": "custom",
"name": "tactic",
"signature": None
}
]
},
"hx_alert_id": 859,
"detect_rulematches": [
{
"confidence": "high",
"severity": "medium",
"ruleid": "99",
"tags": [
"fireeye",
"helixhxrule",
"ioc"
],
"rulename": "fireeye hx",
"revision": 0
},
{
"confidence": "medium",
"severity": "medium",
"ruleid": "1",
"tags": [],
"rulename": "test",
"revision": 0
}
],
"alerturi": "f9zsksax==",
"ppid": 142,
"metaclass": "ids",
"eventid": "101",
"eventtime": "2019-09-13T06:51:59.000Z",
"iocnames": "tactic",
"md5values": [
"md5"
],
"uri_parsed": "uri",
"args": "c:\\windows\\system32\\net1",
"detect_ruleids": [
"99"
],
"agentdetails": {
"containmentState": "normal",
"appStarted": "2019-09-10t05:41:17z",
"regOwner": "george",
"ProRemSvcStatus": "running",
"ProcessTrackerStatus": "disabled",
"configId": "sljlx==",
"timezone": "",
"productID": "00311",
"totalphysical": "170053200",
"ExdPluginStatus": "running",
"uptime": "pt3514s",
"installDate": "2019-07-08t13:28:00z",
"MalwareProtectionStatus": "running",
"@created": "2019-09-13t06:22:12z",
"KernelServices": {
"Status": "loaded"
},
"procConfigInfo": {
"lpcDevice": "intel",
"iommu": "enabled",
"virtualization": "enabled",
"vmGuest": "no"
},
"appVersion": "30.0",
"machine": "desktop",
"platform": "win",
"configChannel": "6430f3d0aea8",
"stateAgentStatus": "ok",
"intelVersion": "101",
"biosInfo": {
"biosVersion": "dell inc.",
"biosDate": "05/09/2009",
"biosType": "uefi"
},
"appCreated": "2019-07-21t16:00:05z",
"networkArray": {
"networkInfo": [
{
"ipArray": {
"ipInfo": [
{
"ipv6Address": "1:1:1:1"
},
{
"ipAddress": "192.168.0.1"
}
]
},
"MAC": "MAC",
"adapter": "{adapter}",
"description": "pangp virtual #2"
},
{
"ipArray": {
"ipInfo": [
{
"ipv6Address": "1:1:1:1"
},
{
"subnetMask": "255.255.0.0",
"ipAddress": "192.168.0.1"
}
]
},
"MAC": "mac",
"adapter": "{}",
"description": "npcap loopback adapter"
},
{
"ipArray": {
"ipInfo": [
{
"ipv6Address": "1:1:1:1"
},
{
"subnetMask": "255.255.255.0",
"ipAddress": "192.168.0.1"
}
]
},
"MAC": "mac",
"adapter": "{}",
"description": "virtualbox host"
},
{
"ipArray": {
"ipInfo": [
{
"ipv6Address": "1:1:1:1"
},
{
"ipAddress": "192.168.0.1"
}
]
},
"MAC": "mac",
"adapter": "{}",
"description": "microsoft wi-fi"
},
{
"ipArray": {
"ipInfo": [
{
"ipv6Address": "1:1:1:1"
},
{
"ipAddress": "192.168.0.1"
}
]
},
"MAC": "mac",
"adapter": "{}",
"description": "microsoft wi-fi"
},
{
"dhcpLeaseObtained": "2019-09-13t06:50:36z",
"description": "vmware virtual ethernet",
"adapter": "{}",
"MAC": "mac",
"dhcpServerArray": {
"dhcpServer": [
"192.168.0.1"
]
},
"dhcpLeaseExpires": "2019-09-13t07:23:36z",
"ipArray": {
"ipInfo": [
{
"ipv6Address": "1:1:1:1"
},
{
"subnetMask": "255.255.255.0",
"ipAddress": "192.168.0.1"
}
]
}
},
{
"dhcpLeaseObtained": "2019-09-11t11:18:59z",
"ipGatewayArray": {
"ipGateway": [
"192.168.0.1"
]
},
"description": "intel(r) dual band",
"adapter": "{}",
"MAC": "mac",
"dhcpServerArray": {
"dhcpServer": [
"192.168.0.1"
]
},
"dhcpLeaseExpires": "2019-01-19t16:18:59z",
"ipArray": {
"ipInfo": [
{
"subnetMask": "255.255.255.0",
"ipAddress": "192.168.0.1"
}
]
}
},
{
"ipArray": {
"ipInfo": [
{
"ipv6Address": "1:1:1:1"
},
{
"ipAddress": "192.168.0.1"
}
]
},
"MAC": "mac",
"adapter": "{}",
"description": "bluetooth device"
},
{
"ipArray": {
"ipInfo": [
{
"ipv6Address": "1:1:1:1"
},
{
"ipAddress": "192.168.0.1"
}
]
},
"adapter": "{}",
"description": "software loopback interface 1"
}
]
},
"drives": "c:,g:",
"intelTimestamp": "2019-01-12t06:51:20z",
"malware": {
"mg": {
"engine": {
"version": "30.19"
},
"content": {
"updated": "2019-01-16t06:12:55z",
"version": "14"
}
},
"UserFPExclusionsContentVersion": "0.0.0",
"DTIExclusionsContentVersion": "1.13.5",
"UserFPExclusionsSchemaVersion": "1.0.0",
"version": "30.17.0",
"QuarantineStatus": "cleanenabled",
"av": {
"engine": {
"version": "11.0"
},
"content": {
"updated": "2019-09-11t04:52:56z",
"version": "7"
}
},
"config": {
"mg": {
"status": "enabled",
"quarantine": {
"status": "enabled"
}
},
"av": {
"status": "enabled",
"quarantine": {
"status": "cleanenabled"
}
}
},
"DTIExclusionsSchemaVersion": "1.0.0"
},
"buildNumber": "18",
"FIPS": "disabled",
"user": "system",
"date": "2019-09-13T06:52:57.000Z",
"productName": "windows 10 home",
"gmtoffset": "+p",
"intelETag": "v1",
"ExdPlugin": {
"engine": {
"version": "300"
},
"content-rules": {
"version": "3.6"
},
"content-whitelist": {
"version": "1.6"
},
"version": "30.6"
},
"OSbitness": "64-bit",
"procType": "multiprocessor free",
"primaryIpv4Address": "192.168.0.1",
"timezoneDST": "",
"EventorStatus": "running",
"availphysical": "4666",
"timezoneStandard": "",
"configETag": "v1/156",
"directory": "c:\\windows\\system32",
"processor": "intel(r) core(tm)",
"clockSkew": "+pts"
},
"meta_deviceid": "",
"agentdomain": "workgroup",
"pprocess": "net.exe",
"is_false_positive": False,
"class": "fireeye_hx_alert",
"agentos": "windows 10 home 18362",
"md5": "md5",
"agentmac": "mac",
"__metadata__": {
"raw_batch_id": "ed5b3525b0c4",
"data_type": "passthrough",
"disable_index": False,
"dynamic_taxonomy": True,
"num_events": 1,
"source_type": "json",
"target_index": "alerts",
"batch_id": "ee7d3ebbed5b3525b0c4",
"customer_id": "",
"id": "9-09-12",
"sequence_number": 0
},
"agentloggedonusers": "font driver host",
"conditionid": "jjvnefleq==",
"uri": "",
"detect_rulenames": [
"fireeye hx [ioc process event]",
"test"
],
"agentip": "192.168.0.1",
"subtype": "None",
"deviceid": "759c",
"starttime": "2019-09-13T06:51:59.276Z",
"agentid": "hcldmjf9zfmwxov9",
"agenthostname": "dm1ps9",
"meta_agentid": "hczFMWXOV9",
"event_values": {
"processEvent/processCmdLine": "c:\\windows\\system32\\net1",
"processEvent/parentPid": 14,
"processEvent/md5": "md5",
"processEvent/processPath": "c:\\windows\\system32\\net1",
"processEvent/parentProcess": "net",
"processEvent/timestamp": "2019-09-13t06:51:59.276z",
"processEvent/startTime": "2019-09-13t06:51:59.276z",
"processEvent/process": "net1.exe",
"processEvent/username": "desktop-54m",
"processEvent/pid": 400,
"processEvent/parentProcessPath": "c:\\windows\\system32\\net.exe",
"processEvent/eventType": "start"
}
}
]
}
NOTES_GET_RESP = {
"meta": {
"count": 2,
"previous": None,
"limit": 30,
"offset": 0,
"next": None
},
"results": [
{
"created_by": {
"id": "a",
"avatar": "avatar",
"name": "George",
"username": "george@demisto.com",
"primary_email": "george@demisto.com"
},
"created_at": "2019-10-28T07:41:30.396000Z",
"id": 9,
"updated_at": "2019-10-28T07:41:42.000123Z",
"note": "This is a note test"
},
{
"created_by": {
"id": "a",
"avatar": "avatar",
"name": "George",
"username": "george@demisto.com",
"primary_email": "george@demisto.com"
},
"created_at": "2019-10-24T13:52:19.021299Z",
"id": 91,
"updated_at": "2019-10-24T13:52:19.021399Z",
"note": "What a great note this is"
}
]
}
NOTES_CREATE_RESP = {
"created_by": {
"id": "a",
"avatar": "avatar",
"name": "George",
"username": "george@demisto.com",
"primary_email": "george@demisto.com"
},
"created_at": "2019-10-28T07:41:30.396000Z",
"id": 9,
"updated_at": "2019-10-28T07:41:42.000123Z",
"note": "This is a note test"
}
LIST_SINGLE_ITEM_RESP = {
"id": 163,
"value": "aTest list",
"type": "misc",
"risk": "Medium",
"notes": "test ok",
"list": 3232
}
LIST_ITEMS_RESP = {
"meta": {
"count": 1,
"previous": None,
"limit": 30,
"offset": 0,
"next": None
},
"results": [
{
"id": 163,
"value": "Test list",
"type": "misc",
"risk": "Low",
"notes": "",
"list": 3232
}
]
}
SEARCH_MULTI_RESP = {
"dsl": {
"from": 0,
"aggs": {
"groupby:subject": {
"meta": {
"field": "subject",
"type": "groupby"
},
"terms": {
"field": "subject.raw",
"order": {
"_count": "desc"
},
"min_doc_count": 1,
"size": 50
}
}
},
"terminate_after": 1,
"directives": {
"scroll_id": "",
"page_size": 2,
"start": "2019-10-28T08:00:00.000Z",
"highlight_terms": [],
"limit": 1,
"timeout": 120000,
"offset": 0,
"indices": [
"events",
"alerts",
"appliance_health"
],
"end": "2019-10-29T08:36:16.947Z",
"search_customer_ids": [
"demisto"
],
"customer_id": "demisto",
"scroll": False
},
"timeout": "120000ms",
"query": {
"bool": {
"filter": [
{
"range": {
"meta_ts": {
"gte": "2019-10-28T08:00:00.000Z",
"lte": "2019-10-29T08:36:16.947Z"
}
}
},
{
"common": {
"domain": {
"cutoff_frequency": 0.001,
"query": "google.com",
"high_freq_operator": "and",
"low_freq_operator": "and"
}
}
}
]
}
},
"size": 2
},
"highlight_terms": None,
"options": {
"disable_regex": False,
"default_timestamp": "meta_ts",
"analyzer_impl": "legacy",
"indices": [
"events",
"alerts",
"appliance_health"
],
"quick_mode": True,
"filters": [],
"offset": 0,
"default_field": "rawmsg",
"use_terminate_after": True,
"scroll": False,
"page_size": 10,
"groupby": {
"threshold": 1,
"separator": "|%$,$%|",
"size": 50
},
"search_customer_ids": [
"demisto"
],
"limit": -1,
"list_type": "indicator",
"es6_compatible": True,
"use_limit_filters": False,
"customer_id": "demisto",
"script_impl": "native"
},
"mql": "domain:google.com and meta_ts>=2019-10-25T09:07:43.810Z {page_size:2 offset:1 limit:1} | groupby subject sep=`|%$,$%|`", # noqa: E501
"results": {
"hits": {
"hits": [
{
"_score": 0.0,
"_type": "event",
"_id": "demisto",
"_source": {
"status": "delivered",
"domain": "mx.google.com",
"_eventid": "demisto",
"rawmsg": "raw_msg",
"meta_cbname": "helix-etp_stats",
"srcipv4": "8.8.8.8",
"meta_ts": "2019-10-28T10:49:27.210Z",
"srclongitude": -122.0785140991211,
"size": "21.23",
"srccountry": "united states",
"eventtype": "trace",
"srccity": "mountain view",
"to": "demisto@demisto.com",
"srclatitude": 37.40599060058594,
"subject": "google",
"metaclass": "email",
"eventid": "demisto",
"inreplyto": "demisto",
"eventtime": "2019-10-28T10:43:11.000Z",
"srcregion": "california",
"meta_oml": 1036,
"class": "fireeye_etp",
"mailfrom": "de@demisto.com",
"rawmsghostname": "helix-etp_stats-demisto-etp_stats",
"__metadata__": {
"raw_batch_id": "demisto",
"data_type": "passthrough",
"disable_index": False,
"dynamic_taxonomy": False,
"num_events": 1,
"source_type": "json",
"target_index": "",
"batch_id": "demisto",
"customer_id": "demisto",
"id": "demisto",
"sequence_number": 0
},
"srcdomain": "google.com",
"srcisp": "google llc",
"srcusagetype": "dch",
"srccountrycode": "us",
"meta_rts": "2019-10-28T10:49:27.000Z",
"meta_cbid": 99999
},
"_index": "2019-10-28t00:00:00.000z"
},
{
"_score": 0.0,
"_type": "event",
"_id": "demisto",
"_source": {
"status": "delivered",
"domain": "gmr-mx.google.com",
"_eventid": "demisto",
"rawmsg": "demisto",
"meta_cbname": "helix-etp_stats",
"srcipv4": "8.8.8.8",
"meta_ts": "2019-10-29T05:13:24.009Z",
"srclongitude": -122.0785140991211,
"size": "315.29",
"srccountry": "united states",
"eventtype": "trace",
"srccity": "mountain view",
"to": "demisto@demisto.com",
"srclatitude": 37.40599060058594,
"subject": "Demisto subj",
"metaclass": "email",
"eventid": "demisto",
"inreplyto": "demisto@demisto.com",
"eventtime": "2019-10-29T05:08:39.000Z",
"srcregion": "california",
"meta_oml": 1178,
"class": "fireeye_etp",
"mailfrom": "dem@demisto.com",
"rawmsghostname": "helix-etp_stats-demisto-etp_stats",
"__metadata__": {
"raw_batch_id": "demisto",
"data_type": "passthrough",
"disable_index": False,
"dynamic_taxonomy": False,
"num_events": 4,
"source_type": "json",
"target_index": "",
"batch_id": "demisto",
"customer_id": "demisto",
"id": "demisto",
"sequence_number": 1
},
"srcdomain": "google.com",
"srcisp": "google llc",
"srcusagetype": "dch",
"srccountrycode": "us",
"meta_rts": "2019-10-29T05:13:24.000Z",
"meta_cbid": 99999
},
"_index": "2019-10-29t00:00:00.000z"
}
],
"total": 11,
"max_score": 0.0
},
"_shards": {
"successful": 66,
"failed": 0,
"total": 66
},
"took": 3046,
"aggregations": {
"groupby:subject": {
"buckets": [
{
"key": "google alert - gold",
"doc_count": 3
},
{
"key": "accepted: meeting",
"doc_count": 1
},
{
"key": "invitation: Declined",
"doc_count": 1
}
],
"meta": {
"field": "subject",
"type": "groupby"
},
"sum_other_doc_count": 0,
"doc_count_error_upper_bound": 0
}
},
"metrics": {
"load": 2.8539999999999996,
"regex": False,
"list": False,
"aggregation": True,
"subsearch": False
},
"terminated_early": True,
"timed_out": False,
"failures": []
}
}
SEARCH_ARCHIVE_RESP = {
"meta": {
"totalCount": 2,
"limit": 30,
"offset": 0
},
"data": [
{
"_createdBy": {
"id": "demisto",
"avatar": "avatar",
"name": "George",
"username": "george@demisto.com",
"primary_email": "demisto@demisto.com"
},
"_updatedBy": {
"id": "demisto",
"avatar": "avatar",
"name": "George",
"username": "george@demisto.com",
"primary_email": "demisto@demisto.com"
},
"completeAfterCount": 0,
"completeAfterDuration": 0,
"createDate": "2019-10-09T11:19:38.253848Z",
"customer_id": "demisto",
"emailNotify": False,
"errors": [],
"id": "82",
"is_part_of_report": False,
"name": "",
"numResults": 457,
"percentComplete": 100.0,
"query": "domain:[google,com] | groupby eventtype",
"queryAST": "{}",
"searchEndDate": "2019-10-09T11:19:00Z",
"searchStartDate": "2019-10-09T11:19:00Z",
"sourceBucket": "",
"state": "completed",
"timeRemaining": 0.0,
"updateDate": "2019-10-09T11:19:00.686503Z"
},
{
"_createdBy": {
"id": "demisto",
"avatar": "avatar",
"name": "George",
"username": "george@demisto.com",
"primary_email": "demisto@demisto.com"
},
"_updatedBy": {
"id": "demisto",
"avatar": "avatar",
"name": "George",
"username": "george@demisto.com",
"primary_email": "demisto@demisto.com"
},
"completeAfterCount": 0,
"completeAfterDuration": 0,
"createDate": "2019-10-09T11:18:52.250000Z",
"customer_id": "demisto",
"emailNotify": False,
"errors": [],
"id": "83",
"is_part_of_report": False,
"name": "",
"numResults": 20,
"percentComplete": 100.0,
"query": "domain:[google] | groupby eventtype",
"queryAST": "{}",
"searchEndDate": "2019-10-09T11:18:28Z",
"searchStartDate": "2019-10-09T11:18:28Z",
"sourceBucket": "",
"state": "completed",
"timeRemaining": 0.0,
"updateDate": "2019-10-09T11:19:21.916006Z"
}
]
}
SEARCH_AGGREGATIONS_SINGLE_RESP = {
"groupby:subject": {
"buckets": [
{
"key": "Test 1",
"doc_count": 1
},
{
"key": "Test 2",
"doc_count": 2
},
{
"key": "Test 3",
"doc_count": 3
},
{
"key": "Test 4",
"doc_count": 4
}
],
"meta": {
"field": "subject",
"type": "groupby"
}
}
}
SEARCH_ARCHIVE_RESULTS_RESP = {
"data": [
{
"_createdBy": {
"id": "demisto",
"avatar": "demisto",
"name": "George",
"username": "george@demisto.com",
"primary_email": "george@demisto.com"
},
"_updatedBy": {
"id": "demisto",
"avatar": "demisto",
"name": "George",
"username": "george@demisto.com",
"primary_email": "george@demisto.com"
},
"completeAfterCount": 0,
"completeAfterDuration": 0,
"createDate": "2019-10-06T11:18:38.253848Z",
"customer_id": "demisto",
"emailNotify": False,
"_errors": [],
"errors": [],
"id": "82",
"is_part_of_report": False,
"name": "",
"numResults": 457,
"percentComplete": 100.0,
"query": "domain:[google,com] | groupby eventtype",
"queryAST": "{}",
"searchEndDate": "2019-10-06T11:18:28Z",
"searchStartDate": "2019-10-05T11:18:28Z",
"sourceBucket": "",
"state": "completed",
"timeRemaining": 0.0,
"updateDate": "2019-10-06T11:18:54.686503Z"
}
],
"results": {
"dsl": {
"from": 0,
"aggs": {
"groupby:eventtype": {
"meta": {
"field": "eventtype",
"type": "groupby"
},
"terms": {
"field": "eventtype",
"order": {
"_count": "desc"
},
"min_doc_count": 1,
"size": 50
}
}
},
"terminate_after": -1,
"directives": {
"scroll_id": "",
"page_size": 10,
"start": "2019-10-28T15:00:00.000Z",
"highlight_terms": [],
"limit": -1,
"timeout": 120000,
"offset": 0,
"indices": [
"events",
"alerts",
"appliance_health"
],
"end": "2019-10-29T15:40:48.571Z",
"search_customer_ids": None,
"customer_id": "",
"scroll": False
},
"timeout": "120000ms",
"query": {
"bool": {
"filter": [
{
"range": {
"meta_ts": {
"gte": "2019-10-28T15:00:00.000Z",
"lte": "2019-10-29T15:40:48.571Z"
}
}
}
],
"minimum_should_match": 1,
"should": [
{
"common": {
"domain": {
"cutoff_frequency": 0.001,
"query": "google",
"high_freq_operator": "and",
"low_freq_operator": "and"
}
}
},
{
"common": {
"domain": {
"cutoff_frequency": 0.001,
"query": "com",
"high_freq_operator": "and",
"low_freq_operator": "and"
}
}
}
]
}
},
"size": 10
},
"mql": "domain:[google,com] | groupby eventtype sep=`|%$,$%|`",
"results": {
"hits": {
"stored": 457,
"hits": [
{
"_type": "event",
"_id": "demisto",
"_source": {
"status": "delivered",
"domain": "domain.com",
"_eventid": "demsito",
"rawmsg": "{}",
"meta_cbname": "helix-etp",
"srcipv4": "8.8.8.8",
"meta_ts": "2019-10-06T10:55:26.103Z",
"srclongitude": -0.1257400,
"size": "40.04",
"srccountry": "",
"eventtype": "trace",
"srccity": "london",
"to": "demisto@demisto.com",
"srclatitude": 51.8594,
"subject": "dictation users",
"metaclass": "email",
"eventid": "evenid",
"inreplyto": "squidward <squidward@demisto.com>",
"eventtime": "2019-10-06T10:48:13.000Z",
"srcregion": "",
"meta_oml": 908,
"class": "fireeye_etp",
"mailfrom": "squidward@demisto.com",
"rawmsghostname": "helix-etp_stats-etp_stats",
"__metadata__": {
"raw_batch_id": "",
"data_type": "passthrough",
"disable_index": False,
"dynamic_taxonomy": False,
"num_events": 10,
"source_type": "json",
"target_index": "",
"batch_id": "",
"customer_id": "",
"id": "",
"sequence_number": 1
},
"srcdomain": "",
"srcisp": "",
"srcusagetype": "",
"srccountrycode": "",
"meta_rts": "2019-10-06T10:55:26.000Z",
"meta_cbid": 99999
},
"_index": "archive"
},
{
"_type": "event",
"_id": "demisto",
"_source": {
"status": "delivered",
"domain": "demisto.com",
"_eventid": "",
"rawmsg": "{}",
"meta_cbname": "helix-etp_stats",
"srcipv4": "8.8.8.8",
"meta_ts": "2019-10-06T11:09:25.946Z",
"srclongitude": -75.19625,
"size": "10.75",
"srccountry": "",
"eventtype": "trace",
"srccity": "cha",
"to": "squidward@demisto.com",
"srclatitude": 40.282958,
"subject": "meet world",
"metaclass": "email",
"eventid": "demisto",
"inreplyto": "\"squidward\" <fsquidward@demisto.com>",
"eventtime": "2019-10-06T11:02:01.000Z",
"srcregion": "penn",
"meta_oml": 1160,
"class": "fireeye_etp",
"mailfrom": "squidward@demisto.com",
"rawmsghostname": "helix-etp_stats-etp_stats",
"__metadata__": {
"raw_batch_id": "demisto",
"data_type": "passthrough",
"disable_index": False,
"dynamic_taxonomy": False,
"num_events": 5,
"source_type": "json",
"target_index": "",
"batch_id": "",
"customer_id": "",
"id": "",
"sequence_number": 0
},
"srcdomain": "squidward.com",
"srcisp": "squidward",
"srcusagetype": "com",
"srccountrycode": "us",
"meta_rts": "2019-10-06T11:09:25.000Z",
"meta_cbid": 99999
},
"_index": "archive"
},
{
"_type": "event",
"_id": "demisto",
"_source": {
"status": "delivered",
"domain": "demisto.com",
"_eventid": "demiostop",
"rawmsg": "{}",
"meta_cbname": "helix-etp_stats",
"srcipv4": "8.8.8.8",
"meta_ts": "2019-10-06T11:09:25.946Z",
"srclongitude": -93.119,
"size": "26.92",
"srccountry": "united states",
"eventtype": "trace",
"srccity": "",
"to": "squidward@demisto.com",
"srclatitude": 33.50,
"subject": "fw: reminder",
"metaclass": "email",
"eventid": "dwasdkffv",
"inreplyto": "squidward <squidward@demisto.com>",
"eventtime": "2019-10-06T11:02:18.000Z",
"srcregion": "lo",
"meta_oml": 1065,
"class": "fireeye_etp",
"mailfrom": "squidward@demisto.com",
"rawmsghostname": "helix-etp_etp_stats",
"__metadata__": {
"raw_batch_id": "sdfdsfdsdfvbvd",
"data_type": "passthrough",
"disable_index": False,
"dynamic_taxonomy": False,
"num_events": 5,
"source_type": "json",
"target_index": "",
"batch_id": "afasvjbjhsde4",
"customer_id": "",
"id": "outg85cgj5",
"sequence_number": 1
},
"srcdomain": "demisto.com",
"srcisp": "demistos",
"srcusagetype": "dch",
"srccountrycode": "us",
"meta_rts": "2019-10-06T11:09:25.000Z",
"meta_cbid": 99999
},
"_index": "archive"
},
{
"_type": "event",
"_id": "squidsdaasfwardsasd",
"_source": {
"status": "delivered",
"domain": "demisto.com",
"_eventid": "jjdpse3",
"rawmsg": "{}",
"meta_cbname": "helix-etp_stats",
"srcipv4": "8.8.8.8",
"meta_ts": "2019-10-06T11:09:27.091Z",
"srclongitude": -84.377,
"size": "16.46",
"srccountry": "united states",
"eventtype": "trace",
"srccity": "at",
"to": "squidward@demisto.com",
"srclatitude": 33.770843,
"subject": "magic link",
"metaclass": "email",
"eventid": "93730",
"inreplyto": "geroge <hello@demisto.com>",
"eventtime": "2019-10-06T11:03:00.000Z",
"srcregion": "georga",
"meta_oml": 1100,
"class": "fireeye_etp",
"mailfrom": "squidward@demisto.com",
"rawmsghostname": "helix-etp_s",
"__metadata__": {
"raw_batch_id": "ssas7",
"data_type": "passthrough",
"disable_index": False,
"dynamic_taxonomy": False,
"num_events": 5,
"source_type": "json",
"target_index": "",
"batch_id": "94gfjs83",
"customer_id": "",
"id": "skdjf8723d",
"sequence_number": 2
},
"srcdomain": "demisto.com",
"srcisp": "the demisto group",
"srcusagetype": "com",
"srccountrycode": "us",
"meta_rts": "2019-10-06T11:09:27.000Z",
"meta_cbid": 99999
},
"_index": "archive"
}
],
"total": 457
},
"aggregations": {
"groupby:eventtype": {
"limited": False,
"buckets": [
{
"key": "trace",
"doc_count": 452
},
{
"key": "dnslookupevent",
"doc_count": 5
}
],
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0
}
},
"took": 4605
}
}
}
RULE_RESP = {
"rules": [
{
"customer_id": "demisto",
"id": "1.1.1",
"_rulePack": "1.1.1",
"assertions": [],
"assertionsCount": 0,
"alertType": "demisto",
"dependencies": [],
"dependenciesCount": 0,
"description": "demisto",
"internal": True,
"deleted": False,
"enabled": True,
"supported": False,
"createDate": "2019-03-30T19:25:00.11113Z",
"_createdBy": {
"id": "demisto",
"avatar": "avatar",
"name": "Demisto",
"username": "demisto",
"primary_email": "demisto@demisto.com"
},
"updateDate": "2019-10-30T20:07:27.330083Z",
"_updatedBy": {
"id": "demisto",
"avatar": "avatar",
"name": "Demisto",
"username": "demisto",
"primary_email": "demisto@demisto.com"
},
"classification": 40,
"confidence": "Medium",
"disabledReason": "",
"distinguishers": [
"srcipv4",
"srcipv6",
"category"
],
"eventsThreshold": 1,
"hash": "demisto",
"infoLinks": [],
"isTuned": False,
"protected": False,
"killChain": [
"6 - C2"
],
"message": "demisto",
"output": [
"alert"
],
"playbooks": [],
"queues": [
"Default Queue"
],
"risk": "Medium",
"search": "demisto",
"searches": [
{
"header": "demisto",
"category": "",
"search": "demisto",
"relativeTime": 860
},
{
"header": "demisto",
"category": "",
"search": "class=demisto msg=<%=msg%> | groupby [srcipv4]",
"relativeTime": 864
}
],
"secondsThreshold": 60,
"severity": "Medium",
"sourceRevision": 0,
"tags": [
"demisto",
"malware",
"http",
"md-info"
],
"threatType": 5,
"type": "alert",
"tuningEventsThreshold": 0,
"tuningSearch": "",
"tuningSecondsThreshold": 0,
"revisions": [
{
"enabled": True,
"_updatedBy": {
"id": "demisto",
"avatar": "avatar",
"name": "Demisto",
"username": "demisto",
"primary_email": "demisto@demisto.com"
},
"updateDate": "2019-10-29T30:07:27.380007Z"
},
{
"enabled": False,
"_updatedBy": {
"id": "demisto",
"avatar": "avatar",
"name": "Demisto",
"username": "demisto",
"primary_email": "demisto@demisto.com"
},
"updateDate": "2019-10-29T23:07:14.560140Z"
},
{
"updateDate": "2019-08-19T23:38:19.518212Z",
"_updatedBy": {
"id": "demisto",
"avatar": "avatar",
"name": "Demisto",
"username": "demisto",
"primary_email": "demisto@demisto.com"
},
"distinguishers": "[\"srcipv4\", \"srcipv6\", \"category\"]"
}
],
"revision": 3
}
],
"meta": {
"count": 2,
"previous": None,
"offset": 1,
"limit": 30,
"next": None
}
}
SEARCH_AGGREGATIONS_MULTI_RESP = {
"groupby:srcipv4_to_subject": {
"buckets": [
{
"key": "192.168.0.1|%$,$%|test1@demisto.com|%$,$%|accepted",
"doc_count": 1
},
{
"key": "192.168.0.2|%$,$%|test2@demisto.com|%$,$%|resume",
"doc_count": 2
},
{
"key": "192.168.0.3|%$,$%|test3@demisto.com|%$,$%|position",
"doc_count": 3
}
],
"meta": {
"fields": [
"srcipv4",
"to",
"subject"
],
"type": "multi_groupby"
}
}
}
| {
"repo_name": "demisto/content",
"path": "Packs/FireEyeHelix/Integrations/FireEyeHelix/test_data/response_constants.py",
"copies": "1",
"size": "66486",
"license": "mit",
"hash": 4443847686036751000,
"line_mean": 34.9189627229,
"line_max": 146,
"alpha_frac": 0.3252414042,
"autogenerated": false,
"ratio": 4.460950080515298,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.5286191484715298,
"avg_score": null,
"num_lines": null
} |
"""alert slack with any auditor"""
import json
import requests
from security_monkey import app
from security_monkey.alerters import custom_alerter
class SlackAlerter(object, metaclass=custom_alerter.AlerterType):
def __init__(self, cls, name, bases, attrs):
super().__init__(cls, name, bases, attrs)
self.slack_config = {}
self.slack_hook = ""
if app.config.get('SLACK_HOOK'):
self.slack_config = {
'channel': app.config.get('SLACK_CHANNEL'),
'username': app.config.get('SLACK_USERNAME'),
'icon_emoji': app.config.get('SLACK_ICON'),
}
self.slack_hook = app.config.get('SLACK_HOOK')
def report_auditor_changes(self, auditor):
for item in auditor.items:
for issue in item.confirmed_new_issues:
x = {
'text': "New Issue: Index: {!s}\n Account: {!s}\n Region: {!s}\n Name: {!s}".format(item.index, item.account, item.region, item.name)
}
self.slack_config['attachments'] = x
try:
requests.post(self.slack_hook, data=json.dumps(self.slack_config))
self.slack_config['attachments'] = []
except Exception as e:
app.logger.exception(e)
app.logger.error("something has gone wrong with the slack post. Please check your configuration. " + e)
| {
"repo_name": "Netflix/security_monkey",
"path": "security_monkey/alerters/slack_alerter.py",
"copies": "1",
"size": "1467",
"license": "apache-2.0",
"hash": -1053776179268034700,
"line_mean": 34.7804878049,
"line_max": 153,
"alpha_frac": 0.5610088616,
"autogenerated": false,
"ratio": 3.964864864864865,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5025873726464865,
"avg_score": null,
"num_lines": null
} |
'''Alerts module: Triggers a outbound alert to a 3rd party API when a
known sound has been detected.
'''
import requests
import datetime
import os
import json
class AlertSender(object):
'''Class to handle the sending of alerts from the audio analysis, via HTTP, to the API.'''
def __init__(self, logger, profile):
self.logger = logger
self.profile = profile
self.sent = 0
def push_alerts(self):
'''If alerts array passed in has sound(s) of a KNOWN type,
trigger alert to both 3rd party and RFCx internal API.
'''
guardian_id = self.profile.guardian_id
checkin_id = self.profile.spectrum.sound.meta_data["checkin_id"]
start_time = self.profile.spectrum.sound.meta_data["start_time"]
audio_id = self.profile.spectrum.sound.meta_data['audio_id']
api_alert_url = os.environ["ALERT_API_DOMAIN_URL"]+"/v1/guardians/"+guardian_id+"/checkins/"+checkin_id+"/audio/"+audio_id+"/events"
json_alerts = []
if not self.profile.interest_fingerprints:
self.logger.info('No alerts to send for file: %s' % (audio_id))
#i = 0 # track interest event classification index
for event in self.profile.interest_fingerprints:
snd_class = event['classification'] #self.profile.alert_types[i][0]
incident_time = event['event_timestamp']
json_alerts.append(
{
"guardian_id": guardian_id,
"checkin_id": checkin_id,
"audio_id": audio_id,
"alert_send_time": str(datetime.datetime.now()),
"recording_start": start_time,
"incident_time": incident_time, #self.profile.alert_types[i][1],
"incident_duration": event['time_interval'][1]-event['time_interval'][0],
"lat_lng": self.profile.spectrum.sound.meta_data['lat_lng'],
"snd_classification": snd_class,
# "harmonic_interval": event['harmonic_intvl'],
}
)
#i += 1
# send up all alert JSON data
try:
# NOTE: posting the json as a file is really not a good way to do this.
# need to eventually this as a string parameter, and update API accordingly
api_alert_token_guid = self.profile.spectrum.sound.meta_data["token_guid"]
api_alert_token = self.profile.spectrum.sound.meta_data["token"]
api_alert_headers = {"x-auth-user": "token/"+api_alert_token_guid, "x-auth-token": api_alert_token}
api_alert_json_post = {'json': ('alerts.json', json.dumps(json_alerts))}
api_alert_req = requests.post(api_alert_url, files=api_alert_json_post, headers=api_alert_headers)
self.logger.error(api_alert_req.text)
self.sent += 1
except Exception, e:
self.logger.error("""%s - Alert POST failed - url: %s""" % (audio_id, api_alert_url))
self.logger.error(e)
| {
"repo_name": "tanapop/rfcx-worker-analysis",
"path": "modules/domain_modules/alerts.py",
"copies": "1",
"size": "3083",
"license": "apache-2.0",
"hash": 5662013720085624000,
"line_mean": 44.3382352941,
"line_max": 140,
"alpha_frac": 0.5828738242,
"autogenerated": false,
"ratio": 3.825062034739454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9883339073282966,
"avg_score": 0.00491935713129744,
"num_lines": 68
} |
"""alert table
Revision ID: afe08201dbc8
Revises: b46043443bf2
Create Date: 2017-04-26 18:10:54.562302
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'afe08201dbc8'
down_revision = 'b46043443bf2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('Alerts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('entity_id_fk', sa.Integer(), nullable=False),
sa.Column('series_id_fk', sa.Integer(), nullable=False),
sa.Column('alert_predicate_type', sa.Enum('data_delay', 'value_too_low', 'value_too_high',
name='alert_predicate_type'), nullable=False),
sa.Column('value', sa.Float(), nullable=False),
sa.Column('is_enabled', sa.Boolean(), nullable=False),
sa.Column('last_check_status', sa.Boolean(), nullable=True),
sa.Column('alert_recipient_email', sa.String(length=255), nullable=True),
sa.Column('delete_ts', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['entity_id_fk'], ['Entities.id'], ),
sa.ForeignKeyConstraint(['series_id_fk'], ['Series_Attributes.id'], ),
sa.PrimaryKeyConstraint('id'))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('Alerts')
# ### end Alembic commands ###
| {
"repo_name": "qiubit/luminis",
"path": "backend/alembic/versions/afe08201dbc8_alert_table.py",
"copies": "1",
"size": "1647",
"license": "mit",
"hash": 7039382028349722000,
"line_mean": 39.1707317073,
"line_max": 110,
"alpha_frac": 0.5701275046,
"autogenerated": false,
"ratio": 3.9782608695652173,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5048388374165217,
"avg_score": null,
"num_lines": null
} |
"""Alert when resources are made public."""
import json
from policyuniverse.policy import Policy
from streamalert.shared.rule import rule
@rule(logs=['cloudtrail:events'])
def cloudtrail_public_resources(rec):
"""
author: spiper
description: Detect resources being made public.
playbook: (a) identify the AWS account in the log
(b) identify what resource(s) are impacted by the API call
(c) determine if the intent is valid, malicious or accidental
"""
# Check S3
if rec['eventName'] == 'PutBucketPolicy':
# S3 doesn't use a policy string, but actual json, unlike all
# other commands
policy = rec.get('requestParameters', {}).get('bucketPolicy', None)
if not policy:
return False
policy = Policy(policy)
if policy.is_internet_accessible():
return True
# Get the policy string for each resource
policy_string = ''
# Check ElasticSearch
if rec['eventName'] == 'CreateElasticsearchDomain':
policy_string = rec.get('requestParameters', {}).get('accessPolicies', '')
elif rec['eventName'] == 'UpdateElasticsearchDomainConfig':
policy_string = rec.get('requestParameters', {}).get('accessPolicies', '')
# Check Glacier Vaults
elif rec['eventName'] == 'SetVaultAccessPolicy':
policy_string = (
rec.get('requestParameters', {}).get('policy', {}).get('policy', '')
)
# Check SQS
elif rec['eventName'] == 'SetQueueAttributes':
policy_string = (
rec.get('requestParameters', {}).get('attributes', {}).get('Policy', '')
)
# Check SNS
elif rec['eventName'] == 'SetTopicAttributes':
if rec.get('requestParameters', {}).get('attributeName', '') == 'Policy':
policy_string = rec['requestParameters'].get('attributeValue', '')
elif rec['eventName'] == 'CreateTopic':
policy_string = (
rec.get('requestParameters', {}).get('attributes', {}).get('Policy', '')
)
# Check ECR
elif rec['eventName'] == 'SetRepositoryPolicy':
policy_string = rec.get('requestParameters', {}).get('policyText', '')
# Check KMS
elif rec['eventName'] == 'PutKeyPolicy':
policy_string = rec.get('requestParameters', {}).get('policy', '')
elif rec['eventName'] == 'CreateKey':
policy_string = rec.get('requestParameters', {}).get('policy', '')
# Check SecretsManager
elif rec['eventName'] == 'PutResourcePolicy':
policy_string = rec.get('requestParameters', {}).get('resourcePolicy', '')
# Check the policy
if policy_string:
policy = json.loads(policy_string)
policy = Policy(policy)
if policy.is_internet_accessible():
return True
return False
| {
"repo_name": "airbnb/streamalert",
"path": "rules/community/cloudwatch_events/cloudtrail_public_resources.py",
"copies": "1",
"size": "2856",
"license": "apache-2.0",
"hash": 5220971040745723000,
"line_mean": 35.6153846154,
"line_max": 84,
"alpha_frac": 0.6022408964,
"autogenerated": false,
"ratio": 4.256333830104322,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017676538033912054,
"num_lines": 78
} |
"""Alert when root AWS credentials are used."""
from streamalert.shared.rule import rule
@rule(
logs=['cloudwatch:events'],
req_subkeys={
'detail': ['userIdentity', 'eventType']
})
def cloudtrail_root_account_usage(rec):
"""
author: airbnb_csirt
description: Root AWS credentials are being used;
This is against best practice and may be an attacker
reference_1: https://aws.amazon.com/premiumsupport/knowledge-center/
cloudtrail-root-action-logs/
reference_2: http://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html
playbook: (a) identify who is using the Root account
(b) ping the individual to determine if intentional and/or legitimate
"""
# reference_1 contains details on logic below
return (rec['detail']['userIdentity']['type'] == 'Root'
and rec['detail']['userIdentity'].get('invokedBy') is None
and rec['detail']['eventType'] != 'AwsServiceEvent')
| {
"repo_name": "airbnb/streamalert",
"path": "rules/community/cloudwatch_events/cloudtrail_root_account_usage.py",
"copies": "1",
"size": "1052",
"license": "apache-2.0",
"hash": -7604687137036976000,
"line_mean": 42.8333333333,
"line_max": 91,
"alpha_frac": 0.6207224335,
"autogenerated": false,
"ratio": 4.015267175572519,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5135989609072519,
"avg_score": null,
"num_lines": null
} |
"""Alexa capabilities."""
from datetime import datetime
import logging
from homeassistant.const import (
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
ATTR_UNIT_OF_MEASUREMENT,
STATE_LOCKED,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNLOCKED,
)
import homeassistant.components.climate.const as climate
from homeassistant.components import light, fan, cover
import homeassistant.util.color as color_util
from .const import (
API_TEMP_UNITS,
API_THERMOSTAT_MODES,
API_THERMOSTAT_PRESETS,
DATE_FORMAT,
PERCENTAGE_FAN_MAP,
)
from .errors import UnsupportedProperty
_LOGGER = logging.getLogger(__name__)
class AlexaCapibility:
"""Base class for Alexa capability interfaces.
The Smart Home Skills API defines a number of "capability interfaces",
roughly analogous to domains in Home Assistant. The supported interfaces
describe what actions can be performed on a particular device.
https://developer.amazon.com/docs/device-apis/message-guide.html
"""
def __init__(self, entity):
"""Initialize an Alexa capibility."""
self.entity = entity
def name(self):
"""Return the Alexa API name of this interface."""
raise NotImplementedError
@staticmethod
def properties_supported():
"""Return what properties this entity supports."""
return []
@staticmethod
def properties_proactively_reported():
"""Return True if properties asynchronously reported."""
return False
@staticmethod
def properties_retrievable():
"""Return True if properties can be retrieved."""
return False
@staticmethod
def get_property(name):
"""Read and return a property.
Return value should be a dict, or raise UnsupportedProperty.
Properties can also have a timeOfSample and uncertaintyInMilliseconds,
but returning those metadata is not yet implemented.
"""
raise UnsupportedProperty(name)
@staticmethod
def supports_deactivation():
"""Applicable only to scenes."""
return None
def serialize_discovery(self):
"""Serialize according to the Discovery API."""
result = {
"type": "AlexaInterface",
"interface": self.name(),
"version": "3",
"properties": {
"supported": self.properties_supported(),
"proactivelyReported": self.properties_proactively_reported(),
"retrievable": self.properties_retrievable(),
},
}
# pylint: disable=assignment-from-none
supports_deactivation = self.supports_deactivation()
if supports_deactivation is not None:
result["supportsDeactivation"] = supports_deactivation
return result
def serialize_properties(self):
"""Return properties serialized for an API response."""
for prop in self.properties_supported():
prop_name = prop["name"]
# pylint: disable=assignment-from-no-return
prop_value = self.get_property(prop_name)
if prop_value is not None:
yield {
"name": prop_name,
"namespace": self.name(),
"value": prop_value,
"timeOfSample": datetime.now().strftime(DATE_FORMAT),
"uncertaintyInMilliseconds": 0,
}
class AlexaEndpointHealth(AlexaCapibility):
"""Implements Alexa.EndpointHealth.
https://developer.amazon.com/docs/smarthome/state-reporting-for-a-smart-home-skill.html#report-state-when-alexa-requests-it
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.EndpointHealth"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "connectivity"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return False
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "connectivity":
raise UnsupportedProperty(name)
if self.entity.state == STATE_UNAVAILABLE:
return {"value": "UNREACHABLE"}
return {"value": "OK"}
class AlexaPowerController(AlexaCapibility):
"""Implements Alexa.PowerController.
https://developer.amazon.com/docs/device-apis/alexa-powercontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PowerController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "powerState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "powerState":
raise UnsupportedProperty(name)
if self.entity.domain == climate.DOMAIN:
is_on = self.entity.state != climate.HVAC_MODE_OFF
else:
is_on = self.entity.state != STATE_OFF
return "ON" if is_on else "OFF"
class AlexaLockController(AlexaCapibility):
"""Implements Alexa.LockController.
https://developer.amazon.com/docs/device-apis/alexa-lockcontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.LockController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "lockState"}]
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "lockState":
raise UnsupportedProperty(name)
if self.entity.state == STATE_LOCKED:
return "LOCKED"
if self.entity.state == STATE_UNLOCKED:
return "UNLOCKED"
return "JAMMED"
class AlexaSceneController(AlexaCapibility):
"""Implements Alexa.SceneController.
https://developer.amazon.com/docs/device-apis/alexa-scenecontroller.html
"""
def __init__(self, entity, supports_deactivation):
"""Initialize the entity."""
super().__init__(entity)
self.supports_deactivation = lambda: supports_deactivation
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.SceneController"
class AlexaBrightnessController(AlexaCapibility):
"""Implements Alexa.BrightnessController.
https://developer.amazon.com/docs/device-apis/alexa-brightnesscontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.BrightnessController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "brightness"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "brightness":
raise UnsupportedProperty(name)
if "brightness" in self.entity.attributes:
return round(self.entity.attributes["brightness"] / 255.0 * 100)
return 0
class AlexaColorController(AlexaCapibility):
"""Implements Alexa.ColorController.
https://developer.amazon.com/docs/device-apis/alexa-colorcontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ColorController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "color"}]
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "color":
raise UnsupportedProperty(name)
hue, saturation = self.entity.attributes.get(light.ATTR_HS_COLOR, (0, 0))
return {
"hue": hue,
"saturation": saturation / 100.0,
"brightness": self.entity.attributes.get(light.ATTR_BRIGHTNESS, 0) / 255.0,
}
class AlexaColorTemperatureController(AlexaCapibility):
"""Implements Alexa.ColorTemperatureController.
https://developer.amazon.com/docs/device-apis/alexa-colortemperaturecontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ColorTemperatureController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "colorTemperatureInKelvin"}]
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "colorTemperatureInKelvin":
raise UnsupportedProperty(name)
if "color_temp" in self.entity.attributes:
return color_util.color_temperature_mired_to_kelvin(
self.entity.attributes["color_temp"]
)
return 0
class AlexaPercentageController(AlexaCapibility):
"""Implements Alexa.PercentageController.
https://developer.amazon.com/docs/device-apis/alexa-percentagecontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PercentageController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "percentage"}]
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "percentage":
raise UnsupportedProperty(name)
if self.entity.domain == fan.DOMAIN:
speed = self.entity.attributes.get(fan.ATTR_SPEED)
return PERCENTAGE_FAN_MAP.get(speed, 0)
if self.entity.domain == cover.DOMAIN:
return self.entity.attributes.get(cover.ATTR_CURRENT_POSITION, 0)
return 0
class AlexaSpeaker(AlexaCapibility):
"""Implements Alexa.Speaker.
https://developer.amazon.com/docs/device-apis/alexa-speaker.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.Speaker"
class AlexaStepSpeaker(AlexaCapibility):
"""Implements Alexa.StepSpeaker.
https://developer.amazon.com/docs/device-apis/alexa-stepspeaker.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.StepSpeaker"
class AlexaPlaybackController(AlexaCapibility):
"""Implements Alexa.PlaybackController.
https://developer.amazon.com/docs/device-apis/alexa-playbackcontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PlaybackController"
class AlexaInputController(AlexaCapibility):
"""Implements Alexa.InputController.
https://developer.amazon.com/docs/device-apis/alexa-inputcontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.InputController"
class AlexaTemperatureSensor(AlexaCapibility):
"""Implements Alexa.TemperatureSensor.
https://developer.amazon.com/docs/device-apis/alexa-temperaturesensor.html
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.TemperatureSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "temperature"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "temperature":
raise UnsupportedProperty(name)
unit = self.entity.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
temp = self.entity.state
if self.entity.domain == climate.DOMAIN:
unit = self.hass.config.units.temperature_unit
temp = self.entity.attributes.get(climate.ATTR_CURRENT_TEMPERATURE)
return {"value": float(temp), "scale": API_TEMP_UNITS[unit]}
class AlexaContactSensor(AlexaCapibility):
"""Implements Alexa.ContactSensor.
The Alexa.ContactSensor interface describes the properties and events used
to report the state of an endpoint that detects contact between two
surfaces. For example, a contact sensor can report whether a door or window
is open.
https://developer.amazon.com/docs/device-apis/alexa-contactsensor.html
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ContactSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "detectionState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "detectionState":
raise UnsupportedProperty(name)
if self.entity.state == STATE_ON:
return "DETECTED"
return "NOT_DETECTED"
class AlexaMotionSensor(AlexaCapibility):
"""Implements Alexa.MotionSensor.
https://developer.amazon.com/docs/device-apis/alexa-motionsensor.html
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.MotionSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "detectionState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "detectionState":
raise UnsupportedProperty(name)
if self.entity.state == STATE_ON:
return "DETECTED"
return "NOT_DETECTED"
class AlexaThermostatController(AlexaCapibility):
"""Implements Alexa.ThermostatController.
https://developer.amazon.com/docs/device-apis/alexa-thermostatcontroller.html
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ThermostatController"
def properties_supported(self):
"""Return what properties this entity supports."""
properties = [{"name": "thermostatMode"}]
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & climate.SUPPORT_TARGET_TEMPERATURE:
properties.append({"name": "targetSetpoint"})
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
properties.append({"name": "lowerSetpoint"})
properties.append({"name": "upperSetpoint"})
return properties
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name == "thermostatMode":
preset = self.entity.attributes.get(climate.ATTR_PRESET_MODE)
if preset in API_THERMOSTAT_PRESETS:
mode = API_THERMOSTAT_PRESETS[preset]
else:
mode = API_THERMOSTAT_MODES.get(self.entity.state)
if mode is None:
_LOGGER.error(
"%s (%s) has unsupported state value '%s'",
self.entity.entity_id,
type(self.entity),
self.entity.state,
)
raise UnsupportedProperty(name)
return mode
unit = self.hass.config.units.temperature_unit
if name == "targetSetpoint":
temp = self.entity.attributes.get(ATTR_TEMPERATURE)
elif name == "lowerSetpoint":
temp = self.entity.attributes.get(climate.ATTR_TARGET_TEMP_LOW)
elif name == "upperSetpoint":
temp = self.entity.attributes.get(climate.ATTR_TARGET_TEMP_HIGH)
else:
raise UnsupportedProperty(name)
if temp is None:
return None
return {"value": float(temp), "scale": API_TEMP_UNITS[unit]}
| {
"repo_name": "fbradyirl/home-assistant",
"path": "homeassistant/components/alexa/capabilities.py",
"copies": "1",
"size": "18509",
"license": "apache-2.0",
"hash": -8064290734625911000,
"line_mean": 30.1599326599,
"line_max": 127,
"alpha_frac": 0.6309363013,
"autogenerated": false,
"ratio": 4.410054801048368,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5540991102348367,
"avg_score": null,
"num_lines": null
} |
"""Alexa capabilities."""
import logging
from typing import List, Optional
from homeassistant.components import (
cover,
fan,
image_processing,
input_number,
light,
timer,
vacuum,
)
from homeassistant.components.alarm_control_panel import ATTR_CODE_FORMAT, FORMAT_NUMBER
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
import homeassistant.components.climate.const as climate
import homeassistant.components.media_player.const as media_player
from homeassistant.const import (
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
ATTR_UNIT_OF_MEASUREMENT,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_IDLE,
STATE_LOCKED,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
STATE_UNLOCKED,
)
from homeassistant.core import State
import homeassistant.util.color as color_util
import homeassistant.util.dt as dt_util
from .const import (
API_TEMP_UNITS,
API_THERMOSTAT_MODES,
API_THERMOSTAT_PRESETS,
DATE_FORMAT,
PERCENTAGE_FAN_MAP,
Inputs,
)
from .errors import UnsupportedProperty
from .resources import (
AlexaCapabilityResource,
AlexaGlobalCatalog,
AlexaModeResource,
AlexaPresetResource,
AlexaSemantics,
)
_LOGGER = logging.getLogger(__name__)
class AlexaCapability:
"""Base class for Alexa capability interfaces.
The Smart Home Skills API defines a number of "capability interfaces",
roughly analogous to domains in Home Assistant. The supported interfaces
describe what actions can be performed on a particular device.
https://developer.amazon.com/docs/device-apis/message-guide.html
"""
supported_locales = {"en-US"}
def __init__(self, entity: State, instance: Optional[str] = None):
"""Initialize an Alexa capability."""
self.entity = entity
self.instance = instance
def name(self) -> str:
"""Return the Alexa API name of this interface."""
raise NotImplementedError
@staticmethod
def properties_supported() -> List[dict]:
"""Return what properties this entity supports."""
return []
@staticmethod
def properties_proactively_reported() -> bool:
"""Return True if properties asynchronously reported."""
return False
@staticmethod
def properties_retrievable() -> bool:
"""Return True if properties can be retrieved."""
return False
@staticmethod
def properties_non_controllable() -> bool:
"""Return True if non controllable."""
return None
@staticmethod
def get_property(name):
"""Read and return a property.
Return value should be a dict, or raise UnsupportedProperty.
Properties can also have a timeOfSample and uncertaintyInMilliseconds,
but returning those metadata is not yet implemented.
"""
raise UnsupportedProperty(name)
@staticmethod
def supports_deactivation():
"""Applicable only to scenes."""
return None
@staticmethod
def capability_proactively_reported():
"""Return True if the capability is proactively reported.
Set properties_proactively_reported() for proactively reported properties.
Applicable to DoorbellEventSource.
"""
return None
@staticmethod
def capability_resources():
"""Return the capability object.
Applicable to ToggleController, RangeController, and ModeController interfaces.
"""
return []
@staticmethod
def configuration():
"""Return the configuration object.
Applicable to the ThermostatController, SecurityControlPanel, ModeController, RangeController,
and EventDetectionSensor.
"""
return []
@staticmethod
def configurations():
"""Return the configurations object.
The plural configurations object is different that the singular configuration object.
Applicable to EqualizerController interface.
"""
return []
@staticmethod
def inputs():
"""Applicable only to media players."""
return []
@staticmethod
def semantics():
"""Return the semantics object.
Applicable to ToggleController, RangeController, and ModeController interfaces.
"""
return []
@staticmethod
def supported_operations():
"""Return the supportedOperations object."""
return []
@staticmethod
def camera_stream_configurations():
"""Applicable only to CameraStreamController."""
return None
def serialize_discovery(self):
"""Serialize according to the Discovery API."""
result = {"type": "AlexaInterface", "interface": self.name(), "version": "3"}
instance = self.instance
if instance is not None:
result["instance"] = instance
properties_supported = self.properties_supported()
if properties_supported:
result["properties"] = {
"supported": self.properties_supported(),
"proactivelyReported": self.properties_proactively_reported(),
"retrievable": self.properties_retrievable(),
}
proactively_reported = self.capability_proactively_reported()
if proactively_reported is not None:
result["proactivelyReported"] = proactively_reported
non_controllable = self.properties_non_controllable()
if non_controllable is not None:
result["properties"]["nonControllable"] = non_controllable
supports_deactivation = self.supports_deactivation()
if supports_deactivation is not None:
result["supportsDeactivation"] = supports_deactivation
capability_resources = self.capability_resources()
if capability_resources:
result["capabilityResources"] = capability_resources
configuration = self.configuration()
if configuration:
result["configuration"] = configuration
# The plural configurations object is different than the singular configuration object above.
configurations = self.configurations()
if configurations:
result["configurations"] = configurations
semantics = self.semantics()
if semantics:
result["semantics"] = semantics
supported_operations = self.supported_operations()
if supported_operations:
result["supportedOperations"] = supported_operations
inputs = self.inputs()
if inputs:
result["inputs"] = inputs
camera_stream_configurations = self.camera_stream_configurations()
if camera_stream_configurations:
result["cameraStreamConfigurations"] = camera_stream_configurations
return result
def serialize_properties(self):
"""Return properties serialized for an API response."""
for prop in self.properties_supported():
prop_name = prop["name"]
try:
prop_value = self.get_property(prop_name)
except UnsupportedProperty:
raise
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Unexpected error getting %s.%s property from %s",
self.name(),
prop_name,
self.entity,
)
prop_value = None
if prop_value is None:
continue
result = {
"name": prop_name,
"namespace": self.name(),
"value": prop_value,
"timeOfSample": dt_util.utcnow().strftime(DATE_FORMAT),
"uncertaintyInMilliseconds": 0,
}
instance = self.instance
if instance is not None:
result["instance"] = instance
yield result
class Alexa(AlexaCapability):
"""Implements Alexa Interface.
Although endpoints implement this interface implicitly,
The API suggests you should explicitly include this interface.
https://developer.amazon.com/docs/device-apis/alexa-interface.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"fr-CA",
"fr-FR",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa"
class AlexaEndpointHealth(AlexaCapability):
"""Implements Alexa.EndpointHealth.
https://developer.amazon.com/docs/smarthome/state-reporting-for-a-smart-home-skill.html#report-state-when-alexa-requests-it
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.EndpointHealth"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "connectivity"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "connectivity":
raise UnsupportedProperty(name)
if self.entity.state == STATE_UNAVAILABLE:
return {"value": "UNREACHABLE"}
return {"value": "OK"}
class AlexaPowerController(AlexaCapability):
"""Implements Alexa.PowerController.
https://developer.amazon.com/docs/device-apis/alexa-powercontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PowerController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "powerState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "powerState":
raise UnsupportedProperty(name)
if self.entity.domain == climate.DOMAIN:
is_on = self.entity.state != climate.HVAC_MODE_OFF
elif self.entity.domain == vacuum.DOMAIN:
is_on = self.entity.state == vacuum.STATE_CLEANING
elif self.entity.domain == timer.DOMAIN:
is_on = self.entity.state != STATE_IDLE
else:
is_on = self.entity.state != STATE_OFF
return "ON" if is_on else "OFF"
class AlexaLockController(AlexaCapability):
"""Implements Alexa.LockController.
https://developer.amazon.com/docs/device-apis/alexa-lockcontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-US",
"es-ES",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.LockController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "lockState"}]
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "lockState":
raise UnsupportedProperty(name)
if self.entity.state == STATE_LOCKED:
return "LOCKED"
if self.entity.state == STATE_UNLOCKED:
return "UNLOCKED"
return "JAMMED"
class AlexaSceneController(AlexaCapability):
"""Implements Alexa.SceneController.
https://developer.amazon.com/docs/device-apis/alexa-scenecontroller.html
"""
supported_locales = {
"de-DE",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
}
def __init__(self, entity, supports_deactivation):
"""Initialize the entity."""
super().__init__(entity)
self.supports_deactivation = lambda: supports_deactivation
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.SceneController"
class AlexaBrightnessController(AlexaCapability):
"""Implements Alexa.BrightnessController.
https://developer.amazon.com/docs/device-apis/alexa-brightnesscontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.BrightnessController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "brightness"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "brightness":
raise UnsupportedProperty(name)
if "brightness" in self.entity.attributes:
return round(self.entity.attributes["brightness"] / 255.0 * 100)
return 0
class AlexaColorController(AlexaCapability):
"""Implements Alexa.ColorController.
https://developer.amazon.com/docs/device-apis/alexa-colorcontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ColorController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "color"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "color":
raise UnsupportedProperty(name)
hue, saturation = self.entity.attributes.get(light.ATTR_HS_COLOR, (0, 0))
return {
"hue": hue,
"saturation": saturation / 100.0,
"brightness": self.entity.attributes.get(light.ATTR_BRIGHTNESS, 0) / 255.0,
}
class AlexaColorTemperatureController(AlexaCapability):
"""Implements Alexa.ColorTemperatureController.
https://developer.amazon.com/docs/device-apis/alexa-colortemperaturecontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ColorTemperatureController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "colorTemperatureInKelvin"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "colorTemperatureInKelvin":
raise UnsupportedProperty(name)
if "color_temp" in self.entity.attributes:
return color_util.color_temperature_mired_to_kelvin(
self.entity.attributes["color_temp"]
)
return None
class AlexaPercentageController(AlexaCapability):
"""Implements Alexa.PercentageController.
https://developer.amazon.com/docs/device-apis/alexa-percentagecontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PercentageController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "percentage"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "percentage":
raise UnsupportedProperty(name)
if self.entity.domain == fan.DOMAIN:
speed = self.entity.attributes.get(fan.ATTR_SPEED)
return PERCENTAGE_FAN_MAP.get(speed, 0)
if self.entity.domain == cover.DOMAIN:
return self.entity.attributes.get(cover.ATTR_CURRENT_POSITION, 0)
return 0
class AlexaSpeaker(AlexaCapability):
"""Implements Alexa.Speaker.
https://developer.amazon.com/docs/device-apis/alexa-speaker.html
"""
supported_locales = {"de-DE", "en-AU", "en-CA", "en-GB", "en-IN", "en-US"}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.Speaker"
def properties_supported(self):
"""Return what properties this entity supports."""
properties = [{"name": "volume"}]
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & media_player.SUPPORT_VOLUME_MUTE:
properties.append({"name": "muted"})
return properties
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name == "volume":
current_level = self.entity.attributes.get(
media_player.ATTR_MEDIA_VOLUME_LEVEL
)
if current_level is not None:
return round(float(current_level) * 100)
if name == "muted":
return bool(
self.entity.attributes.get(media_player.ATTR_MEDIA_VOLUME_MUTED)
)
return None
class AlexaStepSpeaker(AlexaCapability):
"""Implements Alexa.StepSpeaker.
https://developer.amazon.com/docs/device-apis/alexa-stepspeaker.html
"""
supported_locales = {"de-DE", "en-AU", "en-CA", "en-GB", "en-IN", "en-US"}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.StepSpeaker"
class AlexaPlaybackController(AlexaCapability):
"""Implements Alexa.PlaybackController.
https://developer.amazon.com/docs/device-apis/alexa-playbackcontroller.html
"""
supported_locales = {"de-DE", "en-AU", "en-CA", "en-GB", "en-IN", "en-US", "fr-FR"}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PlaybackController"
def supported_operations(self):
"""Return the supportedOperations object.
Supported Operations: FastForward, Next, Pause, Play, Previous, Rewind, StartOver, Stop
"""
supported_features = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
operations = {
media_player.SUPPORT_NEXT_TRACK: "Next",
media_player.SUPPORT_PAUSE: "Pause",
media_player.SUPPORT_PLAY: "Play",
media_player.SUPPORT_PREVIOUS_TRACK: "Previous",
media_player.SUPPORT_STOP: "Stop",
}
return [
value
for operation, value in operations.items()
if operation & supported_features
]
class AlexaInputController(AlexaCapability):
"""Implements Alexa.InputController.
https://developer.amazon.com/docs/device-apis/alexa-inputcontroller.html
"""
supported_locales = {"de-DE", "en-AU", "en-CA", "en-GB", "en-IN", "en-US"}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.InputController"
def inputs(self):
"""Return the list of valid supported inputs."""
source_list = self.entity.attributes.get(
media_player.ATTR_INPUT_SOURCE_LIST, []
)
return AlexaInputController.get_valid_inputs(source_list)
@staticmethod
def get_valid_inputs(source_list):
"""Return list of supported inputs."""
input_list = []
for source in source_list:
formatted_source = (
source.lower().replace("-", "").replace("_", "").replace(" ", "")
)
if formatted_source in Inputs.VALID_SOURCE_NAME_MAP:
input_list.append(
{"name": Inputs.VALID_SOURCE_NAME_MAP[formatted_source]}
)
return input_list
class AlexaTemperatureSensor(AlexaCapability):
"""Implements Alexa.TemperatureSensor.
https://developer.amazon.com/docs/device-apis/alexa-temperaturesensor.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.TemperatureSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "temperature"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "temperature":
raise UnsupportedProperty(name)
unit = self.entity.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
temp = self.entity.state
if self.entity.domain == climate.DOMAIN:
unit = self.hass.config.units.temperature_unit
temp = self.entity.attributes.get(climate.ATTR_CURRENT_TEMPERATURE)
if temp in (STATE_UNAVAILABLE, STATE_UNKNOWN, None):
return None
try:
temp = float(temp)
except ValueError:
_LOGGER.warning("Invalid temp value %s for %s", temp, self.entity.entity_id)
return None
return {"value": temp, "scale": API_TEMP_UNITS[unit]}
class AlexaContactSensor(AlexaCapability):
"""Implements Alexa.ContactSensor.
The Alexa.ContactSensor interface describes the properties and events used
to report the state of an endpoint that detects contact between two
surfaces. For example, a contact sensor can report whether a door or window
is open.
https://developer.amazon.com/docs/device-apis/alexa-contactsensor.html
"""
supported_locales = {"en-CA", "en-US", "it-IT"}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ContactSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "detectionState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "detectionState":
raise UnsupportedProperty(name)
if self.entity.state == STATE_ON:
return "DETECTED"
return "NOT_DETECTED"
class AlexaMotionSensor(AlexaCapability):
"""Implements Alexa.MotionSensor.
https://developer.amazon.com/docs/device-apis/alexa-motionsensor.html
"""
supported_locales = {"en-CA", "en-US", "it-IT"}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.MotionSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "detectionState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "detectionState":
raise UnsupportedProperty(name)
if self.entity.state == STATE_ON:
return "DETECTED"
return "NOT_DETECTED"
class AlexaThermostatController(AlexaCapability):
"""Implements Alexa.ThermostatController.
https://developer.amazon.com/docs/device-apis/alexa-thermostatcontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ThermostatController"
def properties_supported(self):
"""Return what properties this entity supports."""
properties = [{"name": "thermostatMode"}]
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & climate.SUPPORT_TARGET_TEMPERATURE:
properties.append({"name": "targetSetpoint"})
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
properties.append({"name": "lowerSetpoint"})
properties.append({"name": "upperSetpoint"})
return properties
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if self.entity.state == STATE_UNAVAILABLE:
return None
if name == "thermostatMode":
preset = self.entity.attributes.get(climate.ATTR_PRESET_MODE)
if preset in API_THERMOSTAT_PRESETS:
mode = API_THERMOSTAT_PRESETS[preset]
else:
mode = API_THERMOSTAT_MODES.get(self.entity.state)
if mode is None:
_LOGGER.error(
"%s (%s) has unsupported state value '%s'",
self.entity.entity_id,
type(self.entity),
self.entity.state,
)
raise UnsupportedProperty(name)
return mode
unit = self.hass.config.units.temperature_unit
if name == "targetSetpoint":
temp = self.entity.attributes.get(ATTR_TEMPERATURE)
elif name == "lowerSetpoint":
temp = self.entity.attributes.get(climate.ATTR_TARGET_TEMP_LOW)
elif name == "upperSetpoint":
temp = self.entity.attributes.get(climate.ATTR_TARGET_TEMP_HIGH)
else:
raise UnsupportedProperty(name)
if temp is None:
return None
try:
temp = float(temp)
except ValueError:
_LOGGER.warning(
"Invalid temp value %s for %s in %s", temp, name, self.entity.entity_id
)
return None
return {"value": temp, "scale": API_TEMP_UNITS[unit]}
def configuration(self):
"""Return configuration object.
Translates climate HVAC_MODES and PRESETS to supported Alexa ThermostatMode Values.
ThermostatMode Value must be AUTO, COOL, HEAT, ECO, OFF, or CUSTOM.
"""
supported_modes = []
hvac_modes = self.entity.attributes.get(climate.ATTR_HVAC_MODES)
for mode in hvac_modes:
thermostat_mode = API_THERMOSTAT_MODES.get(mode)
if thermostat_mode:
supported_modes.append(thermostat_mode)
preset_modes = self.entity.attributes.get(climate.ATTR_PRESET_MODES)
if preset_modes:
for mode in preset_modes:
thermostat_mode = API_THERMOSTAT_PRESETS.get(mode)
if thermostat_mode:
supported_modes.append(thermostat_mode)
# Return False for supportsScheduling until supported with event listener in handler.
configuration = {"supportsScheduling": False}
if supported_modes:
configuration["supportedModes"] = supported_modes
return configuration
class AlexaPowerLevelController(AlexaCapability):
"""Implements Alexa.PowerLevelController.
https://developer.amazon.com/docs/device-apis/alexa-powerlevelcontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PowerLevelController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "powerLevel"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "powerLevel":
raise UnsupportedProperty(name)
if self.entity.domain == fan.DOMAIN:
speed = self.entity.attributes.get(fan.ATTR_SPEED)
return PERCENTAGE_FAN_MAP.get(speed)
return None
class AlexaSecurityPanelController(AlexaCapability):
"""Implements Alexa.SecurityPanelController.
https://developer.amazon.com/docs/device-apis/alexa-securitypanelcontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"es-US",
"fr-CA",
"fr-FR",
"it-IT",
"ja-JP",
"pt_BR",
}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.SecurityPanelController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "armState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "armState":
raise UnsupportedProperty(name)
arm_state = self.entity.state
if arm_state == STATE_ALARM_ARMED_HOME:
return "ARMED_STAY"
if arm_state == STATE_ALARM_ARMED_AWAY:
return "ARMED_AWAY"
if arm_state == STATE_ALARM_ARMED_NIGHT:
return "ARMED_NIGHT"
if arm_state == STATE_ALARM_ARMED_CUSTOM_BYPASS:
return "ARMED_STAY"
return "DISARMED"
def configuration(self):
"""Return configuration object with supported authorization types."""
code_format = self.entity.attributes.get(ATTR_CODE_FORMAT)
supported = self.entity.attributes[ATTR_SUPPORTED_FEATURES]
configuration = {}
supported_arm_states = [{"value": "DISARMED"}]
if supported & SUPPORT_ALARM_ARM_AWAY:
supported_arm_states.append({"value": "ARMED_AWAY"})
if supported & SUPPORT_ALARM_ARM_HOME:
supported_arm_states.append({"value": "ARMED_STAY"})
if supported & SUPPORT_ALARM_ARM_NIGHT:
supported_arm_states.append({"value": "ARMED_NIGHT"})
configuration["supportedArmStates"] = supported_arm_states
if code_format == FORMAT_NUMBER:
configuration["supportedAuthorizationTypes"] = [{"type": "FOUR_DIGIT_PIN"}]
return configuration
class AlexaModeController(AlexaCapability):
"""Implements Alexa.ModeController.
The instance property must be unique across ModeController, RangeController, ToggleController within the same device.
The instance property should be a concatenated string of device domain period and single word.
e.g. fan.speed & fan.direction.
The instance property must not contain words from other instance property strings within the same device.
e.g. Instance property cover.position & cover.tilt_position will cause the Alexa.Discovery directive to fail.
An instance property string value may be reused for different devices.
https://developer.amazon.com/docs/device-apis/alexa-modecontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"fr-CA",
"fr-FR",
"it-IT",
"ja-JP",
}
def __init__(self, entity, instance, non_controllable=False):
"""Initialize the entity."""
super().__init__(entity, instance)
self._resource = None
self._semantics = None
self.properties_non_controllable = lambda: non_controllable
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ModeController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "mode"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "mode":
raise UnsupportedProperty(name)
# Fan Direction
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_DIRECTION}":
mode = self.entity.attributes.get(fan.ATTR_DIRECTION, None)
if mode in (fan.DIRECTION_FORWARD, fan.DIRECTION_REVERSE, STATE_UNKNOWN):
return f"{fan.ATTR_DIRECTION}.{mode}"
# Cover Position
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
# Return state instead of position when using ModeController.
mode = self.entity.state
if mode in (
cover.STATE_OPEN,
cover.STATE_OPENING,
cover.STATE_CLOSED,
cover.STATE_CLOSING,
STATE_UNKNOWN,
):
return f"{cover.ATTR_POSITION}.{mode}"
return None
def configuration(self):
"""Return configuration with modeResources."""
if isinstance(self._resource, AlexaCapabilityResource):
return self._resource.serialize_configuration()
return None
def capability_resources(self):
"""Return capabilityResources object."""
# Fan Direction Resource
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_DIRECTION}":
self._resource = AlexaModeResource(
[AlexaGlobalCatalog.SETTING_DIRECTION], False
)
self._resource.add_mode(
f"{fan.ATTR_DIRECTION}.{fan.DIRECTION_FORWARD}", [fan.DIRECTION_FORWARD]
)
self._resource.add_mode(
f"{fan.ATTR_DIRECTION}.{fan.DIRECTION_REVERSE}", [fan.DIRECTION_REVERSE]
)
return self._resource.serialize_capability_resources()
# Cover Position Resources
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
self._resource = AlexaModeResource(
["Position", AlexaGlobalCatalog.SETTING_OPENING], False
)
self._resource.add_mode(
f"{cover.ATTR_POSITION}.{cover.STATE_OPEN}",
[AlexaGlobalCatalog.VALUE_OPEN],
)
self._resource.add_mode(
f"{cover.ATTR_POSITION}.{cover.STATE_CLOSED}",
[AlexaGlobalCatalog.VALUE_CLOSE],
)
self._resource.add_mode(
f"{cover.ATTR_POSITION}.custom",
["Custom", AlexaGlobalCatalog.SETTING_PRESET],
)
return self._resource.serialize_capability_resources()
return None
def semantics(self):
"""Build and return semantics object."""
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
# Cover Position
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
lower_labels = [AlexaSemantics.ACTION_LOWER]
raise_labels = [AlexaSemantics.ACTION_RAISE]
self._semantics = AlexaSemantics()
# Add open/close semantics if tilt is not supported.
if not supported & cover.SUPPORT_SET_TILT_POSITION:
lower_labels.append(AlexaSemantics.ACTION_CLOSE)
raise_labels.append(AlexaSemantics.ACTION_OPEN)
self._semantics.add_states_to_value(
[AlexaSemantics.STATES_CLOSED],
f"{cover.ATTR_POSITION}.{cover.STATE_CLOSED}",
)
self._semantics.add_states_to_value(
[AlexaSemantics.STATES_OPEN],
f"{cover.ATTR_POSITION}.{cover.STATE_OPEN}",
)
self._semantics.add_action_to_directive(
lower_labels,
"SetMode",
{"mode": f"{cover.ATTR_POSITION}.{cover.STATE_CLOSED}"},
)
self._semantics.add_action_to_directive(
raise_labels,
"SetMode",
{"mode": f"{cover.ATTR_POSITION}.{cover.STATE_OPEN}"},
)
return self._semantics.serialize_semantics()
return None
class AlexaRangeController(AlexaCapability):
"""Implements Alexa.RangeController.
The instance property must be unique across ModeController, RangeController, ToggleController within the same device.
The instance property should be a concatenated string of device domain period and single word.
e.g. fan.speed & fan.direction.
The instance property must not contain words from other instance property strings within the same device.
e.g. Instance property cover.position & cover.tilt_position will cause the Alexa.Discovery directive to fail.
An instance property string value may be reused for different devices.
https://developer.amazon.com/docs/device-apis/alexa-rangecontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"fr-CA",
"fr-FR",
"it-IT",
"ja-JP",
}
def __init__(self, entity, instance, non_controllable=False):
"""Initialize the entity."""
super().__init__(entity, instance)
self._resource = None
self._semantics = None
self.properties_non_controllable = lambda: non_controllable
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.RangeController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "rangeValue"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "rangeValue":
raise UnsupportedProperty(name)
# Return None for unavailable and unknown states.
# Allows the Alexa.EndpointHealth Interface to handle the unavailable state in a stateReport.
if self.entity.state in (STATE_UNAVAILABLE, STATE_UNKNOWN, None):
return None
# Fan Speed
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_SPEED}":
speed_list = self.entity.attributes.get(fan.ATTR_SPEED_LIST)
speed = self.entity.attributes.get(fan.ATTR_SPEED)
if speed_list is not None and speed is not None:
speed_index = next(
(i for i, v in enumerate(speed_list) if v == speed), None
)
return speed_index
# Cover Position
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
return self.entity.attributes.get(cover.ATTR_CURRENT_POSITION)
# Cover Tilt
if self.instance == f"{cover.DOMAIN}.tilt":
return self.entity.attributes.get(cover.ATTR_CURRENT_TILT_POSITION)
# Input Number Value
if self.instance == f"{input_number.DOMAIN}.{input_number.ATTR_VALUE}":
return float(self.entity.state)
# Vacuum Fan Speed
if self.instance == f"{vacuum.DOMAIN}.{vacuum.ATTR_FAN_SPEED}":
speed_list = self.entity.attributes.get(vacuum.ATTR_FAN_SPEED_LIST)
speed = self.entity.attributes.get(vacuum.ATTR_FAN_SPEED)
if speed_list is not None and speed is not None:
speed_index = next(
(i for i, v in enumerate(speed_list) if v == speed), None
)
return speed_index
return None
def configuration(self):
"""Return configuration with presetResources."""
if isinstance(self._resource, AlexaCapabilityResource):
return self._resource.serialize_configuration()
return None
def capability_resources(self):
"""Return capabilityResources object."""
# Fan Speed Resources
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_SPEED}":
speed_list = self.entity.attributes[fan.ATTR_SPEED_LIST]
max_value = len(speed_list) - 1
self._resource = AlexaPresetResource(
labels=[AlexaGlobalCatalog.SETTING_FAN_SPEED],
min_value=0,
max_value=max_value,
precision=1,
)
for index, speed in enumerate(speed_list):
labels = []
if isinstance(speed, str):
labels.append(speed.replace("_", " "))
if index == 1:
labels.append(AlexaGlobalCatalog.VALUE_MINIMUM)
if index == max_value:
labels.append(AlexaGlobalCatalog.VALUE_MAXIMUM)
if len(labels) > 0:
self._resource.add_preset(value=index, labels=labels)
return self._resource.serialize_capability_resources()
# Cover Position Resources
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
self._resource = AlexaPresetResource(
["Position", AlexaGlobalCatalog.SETTING_OPENING],
min_value=0,
max_value=100,
precision=1,
unit=AlexaGlobalCatalog.UNIT_PERCENT,
)
return self._resource.serialize_capability_resources()
# Cover Tilt Resources
if self.instance == f"{cover.DOMAIN}.tilt":
self._resource = AlexaPresetResource(
["Tilt", "Angle", AlexaGlobalCatalog.SETTING_DIRECTION],
min_value=0,
max_value=100,
precision=1,
unit=AlexaGlobalCatalog.UNIT_PERCENT,
)
return self._resource.serialize_capability_resources()
# Input Number Value
if self.instance == f"{input_number.DOMAIN}.{input_number.ATTR_VALUE}":
min_value = float(self.entity.attributes[input_number.ATTR_MIN])
max_value = float(self.entity.attributes[input_number.ATTR_MAX])
precision = float(self.entity.attributes.get(input_number.ATTR_STEP, 1))
unit = self.entity.attributes.get(input_number.ATTR_UNIT_OF_MEASUREMENT)
self._resource = AlexaPresetResource(
["Value", AlexaGlobalCatalog.SETTING_PRESET],
min_value=min_value,
max_value=max_value,
precision=precision,
unit=unit,
)
self._resource.add_preset(
value=min_value, labels=[AlexaGlobalCatalog.VALUE_MINIMUM]
)
self._resource.add_preset(
value=max_value, labels=[AlexaGlobalCatalog.VALUE_MAXIMUM]
)
return self._resource.serialize_capability_resources()
# Vacuum Fan Speed Resources
if self.instance == f"{vacuum.DOMAIN}.{vacuum.ATTR_FAN_SPEED}":
speed_list = self.entity.attributes[vacuum.ATTR_FAN_SPEED_LIST]
max_value = len(speed_list) - 1
self._resource = AlexaPresetResource(
labels=[AlexaGlobalCatalog.SETTING_FAN_SPEED],
min_value=0,
max_value=max_value,
precision=1,
)
for index, speed in enumerate(speed_list):
labels = [speed.replace("_", " ")]
if index == 1:
labels.append(AlexaGlobalCatalog.VALUE_MINIMUM)
if index == max_value:
labels.append(AlexaGlobalCatalog.VALUE_MAXIMUM)
self._resource.add_preset(value=index, labels=labels)
return self._resource.serialize_capability_resources()
return None
def semantics(self):
"""Build and return semantics object."""
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
# Cover Position
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
lower_labels = [AlexaSemantics.ACTION_LOWER]
raise_labels = [AlexaSemantics.ACTION_RAISE]
self._semantics = AlexaSemantics()
# Add open/close semantics if tilt is not supported.
if not supported & cover.SUPPORT_SET_TILT_POSITION:
lower_labels.append(AlexaSemantics.ACTION_CLOSE)
raise_labels.append(AlexaSemantics.ACTION_OPEN)
self._semantics.add_states_to_value(
[AlexaSemantics.STATES_CLOSED], value=0
)
self._semantics.add_states_to_range(
[AlexaSemantics.STATES_OPEN], min_value=1, max_value=100
)
self._semantics.add_action_to_directive(
lower_labels, "SetRangeValue", {"rangeValue": 0}
)
self._semantics.add_action_to_directive(
raise_labels, "SetRangeValue", {"rangeValue": 100}
)
return self._semantics.serialize_semantics()
# Cover Tilt
if self.instance == f"{cover.DOMAIN}.tilt":
self._semantics = AlexaSemantics()
self._semantics.add_action_to_directive(
[AlexaSemantics.ACTION_CLOSE], "SetRangeValue", {"rangeValue": 0}
)
self._semantics.add_action_to_directive(
[AlexaSemantics.ACTION_OPEN], "SetRangeValue", {"rangeValue": 100}
)
self._semantics.add_states_to_value([AlexaSemantics.STATES_CLOSED], value=0)
self._semantics.add_states_to_range(
[AlexaSemantics.STATES_OPEN], min_value=1, max_value=100
)
return self._semantics.serialize_semantics()
return None
class AlexaToggleController(AlexaCapability):
"""Implements Alexa.ToggleController.
The instance property must be unique across ModeController, RangeController, ToggleController within the same device.
The instance property should be a concatenated string of device domain period and single word.
e.g. fan.speed & fan.direction.
The instance property must not contain words from other instance property strings within the same device.
e.g. Instance property cover.position & cover.tilt_position will cause the Alexa.Discovery directive to fail.
An instance property string value may be reused for different devices.
https://developer.amazon.com/docs/device-apis/alexa-togglecontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"fr-CA",
"fr-FR",
"it-IT",
"ja-JP",
}
def __init__(self, entity, instance, non_controllable=False):
"""Initialize the entity."""
super().__init__(entity, instance)
self._resource = None
self._semantics = None
self.properties_non_controllable = lambda: non_controllable
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ToggleController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "toggleState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "toggleState":
raise UnsupportedProperty(name)
# Fan Oscillating
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}":
is_on = bool(self.entity.attributes.get(fan.ATTR_OSCILLATING))
return "ON" if is_on else "OFF"
return None
def capability_resources(self):
"""Return capabilityResources object."""
# Fan Oscillating Resource
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}":
self._resource = AlexaCapabilityResource(
[AlexaGlobalCatalog.SETTING_OSCILLATE, "Rotate", "Rotation"]
)
return self._resource.serialize_capability_resources()
return None
class AlexaChannelController(AlexaCapability):
"""Implements Alexa.ChannelController.
https://developer.amazon.com/docs/device-apis/alexa-channelcontroller.html
"""
supported_locales = {"de-DE", "en-AU", "en-CA", "en-GB", "en-IN", "en-US"}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ChannelController"
class AlexaDoorbellEventSource(AlexaCapability):
"""Implements Alexa.DoorbellEventSource.
https://developer.amazon.com/docs/device-apis/alexa-doorbelleventsource.html
"""
supported_locales = {
"en-US",
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"fr-CA",
"fr-FR",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.DoorbellEventSource"
def capability_proactively_reported(self):
"""Return True for proactively reported capability."""
return True
class AlexaPlaybackStateReporter(AlexaCapability):
"""Implements Alexa.PlaybackStateReporter.
https://developer.amazon.com/docs/device-apis/alexa-playbackstatereporter.html
"""
supported_locales = {"de-DE", "en-GB", "en-US", "fr-FR"}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PlaybackStateReporter"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "playbackState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "playbackState":
raise UnsupportedProperty(name)
playback_state = self.entity.state
if playback_state == STATE_PLAYING:
return {"state": "PLAYING"}
if playback_state == STATE_PAUSED:
return {"state": "PAUSED"}
return {"state": "STOPPED"}
class AlexaSeekController(AlexaCapability):
"""Implements Alexa.SeekController.
https://developer.amazon.com/docs/device-apis/alexa-seekcontroller.html
"""
supported_locales = {"de-DE", "en-GB", "en-US"}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.SeekController"
class AlexaEventDetectionSensor(AlexaCapability):
"""Implements Alexa.EventDetectionSensor.
https://developer.amazon.com/docs/device-apis/alexa-eventdetectionsensor.html
"""
supported_locales = {"en-US"}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.EventDetectionSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "humanPresenceDetectionState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "humanPresenceDetectionState":
raise UnsupportedProperty(name)
human_presence = "NOT_DETECTED"
state = self.entity.state
# Return None for unavailable and unknown states.
# Allows the Alexa.EndpointHealth Interface to handle the unavailable state in a stateReport.
if state in (STATE_UNAVAILABLE, STATE_UNKNOWN, None):
return None
if self.entity.domain == image_processing.DOMAIN:
if int(state):
human_presence = "DETECTED"
elif state == STATE_ON:
human_presence = "DETECTED"
return {"value": human_presence}
def configuration(self):
"""Return supported detection types."""
return {
"detectionMethods": ["AUDIO", "VIDEO"],
"detectionModes": {
"humanPresence": {
"featureAvailability": "ENABLED",
"supportsNotDetected": True,
}
},
}
class AlexaEqualizerController(AlexaCapability):
"""Implements Alexa.EqualizerController.
https://developer.amazon.com/en-US/docs/alexa/device-apis/alexa-equalizercontroller.html
"""
supported_locales = {"en-US"}
VALID_SOUND_MODES = {
"MOVIE",
"MUSIC",
"NIGHT",
"SPORT",
"TV",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.EqualizerController"
def properties_supported(self):
"""Return what properties this entity supports.
Either bands, mode or both can be specified. Only mode is supported at this time.
"""
return [{"name": "mode"}]
def get_property(self, name):
"""Read and return a property."""
if name != "mode":
raise UnsupportedProperty(name)
sound_mode = self.entity.attributes.get(media_player.ATTR_SOUND_MODE)
if sound_mode and sound_mode.upper() in self.VALID_SOUND_MODES:
return sound_mode.upper()
return None
def configurations(self):
"""Return the sound modes supported in the configurations object."""
configurations = None
supported_sound_modes = self.get_valid_inputs(
self.entity.attributes.get(media_player.ATTR_SOUND_MODE_LIST, [])
)
if supported_sound_modes:
configurations = {"modes": {"supported": supported_sound_modes}}
return configurations
@classmethod
def get_valid_inputs(cls, sound_mode_list):
"""Return list of supported inputs."""
input_list = []
for sound_mode in sound_mode_list:
sound_mode = sound_mode.upper()
if sound_mode in cls.VALID_SOUND_MODES:
input_list.append({"name": sound_mode})
return input_list
class AlexaTimeHoldController(AlexaCapability):
"""Implements Alexa.TimeHoldController.
https://developer.amazon.com/docs/device-apis/alexa-timeholdcontroller.html
"""
supported_locales = {"en-US"}
def __init__(self, entity, allow_remote_resume=False):
"""Initialize the entity."""
super().__init__(entity)
self._allow_remote_resume = allow_remote_resume
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.TimeHoldController"
def configuration(self):
"""Return configuration object.
Set allowRemoteResume to True if Alexa can restart the operation on the device.
When false, Alexa does not send the Resume directive.
"""
return {"allowRemoteResume": self._allow_remote_resume}
class AlexaCameraStreamController(AlexaCapability):
"""Implements Alexa.CameraStreamController.
https://developer.amazon.com/docs/device-apis/alexa-camerastreamcontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.CameraStreamController"
def camera_stream_configurations(self):
"""Return cameraStreamConfigurations object."""
return [
{
"protocols": ["HLS"],
"resolutions": [{"width": 1280, "height": 720}],
"authorizationTypes": ["NONE"],
"videoCodecs": ["H264"],
"audioCodecs": ["AAC"],
}
]
| {
"repo_name": "sdague/home-assistant",
"path": "homeassistant/components/alexa/capabilities.py",
"copies": "3",
"size": "61105",
"license": "apache-2.0",
"hash": 3597684275242113000,
"line_mean": 30.3358974359,
"line_max": 127,
"alpha_frac": 0.5954831847,
"autogenerated": false,
"ratio": 4.26056338028169,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.635604656498169,
"avg_score": null,
"num_lines": null
} |
"""Alexa capabilities."""
import logging
from homeassistant.const import (
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
ATTR_UNIT_OF_MEASUREMENT,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_CLOSED,
STATE_LOCKED,
STATE_OFF,
STATE_ON,
STATE_OPEN,
STATE_PAUSED,
STATE_PLAYING,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
STATE_UNLOCKED,
)
import homeassistant.components.climate.const as climate
import homeassistant.components.media_player.const as media_player
from homeassistant.components.alarm_control_panel import ATTR_CODE_FORMAT, FORMAT_NUMBER
from homeassistant.components import light, fan, cover
import homeassistant.util.color as color_util
import homeassistant.util.dt as dt_util
from .const import (
Catalog,
API_TEMP_UNITS,
API_THERMOSTAT_MODES,
API_THERMOSTAT_PRESETS,
DATE_FORMAT,
PERCENTAGE_FAN_MAP,
RANGE_FAN_MAP,
Inputs,
)
from .errors import UnsupportedProperty
_LOGGER = logging.getLogger(__name__)
class AlexaCapability:
"""Base class for Alexa capability interfaces.
The Smart Home Skills API defines a number of "capability interfaces",
roughly analogous to domains in Home Assistant. The supported interfaces
describe what actions can be performed on a particular device.
https://developer.amazon.com/docs/device-apis/message-guide.html
"""
def __init__(self, entity, instance=None):
"""Initialize an Alexa capability."""
self.entity = entity
self.instance = instance
def name(self):
"""Return the Alexa API name of this interface."""
raise NotImplementedError
@staticmethod
def properties_supported():
"""Return what properties this entity supports."""
return []
@staticmethod
def properties_proactively_reported():
"""Return True if properties asynchronously reported."""
return False
@staticmethod
def properties_retrievable():
"""Return True if properties can be retrieved."""
return False
@staticmethod
def properties_non_controllable():
"""Return True if non controllable."""
return None
@staticmethod
def get_property(name):
"""Read and return a property.
Return value should be a dict, or raise UnsupportedProperty.
Properties can also have a timeOfSample and uncertaintyInMilliseconds,
but returning those metadata is not yet implemented.
"""
raise UnsupportedProperty(name)
@staticmethod
def supports_deactivation():
"""Applicable only to scenes."""
return None
@staticmethod
def capability_proactively_reported():
"""Return True if the capability is proactively reported.
Set properties_proactively_reported() for proactively reported properties.
Applicable to DoorbellEventSource.
"""
return None
@staticmethod
def capability_resources():
"""Applicable to ToggleController, RangeController, and ModeController interfaces."""
return []
@staticmethod
def configuration():
"""Return the Configuration object."""
return []
@staticmethod
def inputs():
"""Applicable only to media players."""
return []
@staticmethod
def supported_operations():
"""Return the supportedOperations object."""
return []
def serialize_discovery(self):
"""Serialize according to the Discovery API."""
result = {"type": "AlexaInterface", "interface": self.name(), "version": "3"}
properties_supported = self.properties_supported()
if properties_supported:
result["properties"] = {
"supported": self.properties_supported(),
"proactivelyReported": self.properties_proactively_reported(),
"retrievable": self.properties_retrievable(),
}
# pylint: disable=assignment-from-none
proactively_reported = self.capability_proactively_reported()
if proactively_reported is not None:
result["proactivelyReported"] = proactively_reported
# pylint: disable=assignment-from-none
non_controllable = self.properties_non_controllable()
if non_controllable is not None:
result["properties"]["nonControllable"] = non_controllable
# pylint: disable=assignment-from-none
supports_deactivation = self.supports_deactivation()
if supports_deactivation is not None:
result["supportsDeactivation"] = supports_deactivation
capability_resources = self.serialize_capability_resources()
if capability_resources:
result["capabilityResources"] = capability_resources
configuration = self.configuration()
if configuration:
result["configuration"] = configuration
# pylint: disable=assignment-from-none
instance = self.instance
if instance is not None:
result["instance"] = instance
supported_operations = self.supported_operations()
if supported_operations:
result["supportedOperations"] = supported_operations
inputs = self.inputs()
if inputs:
result["inputs"] = inputs
return result
def serialize_properties(self):
"""Return properties serialized for an API response."""
for prop in self.properties_supported():
prop_name = prop["name"]
# pylint: disable=assignment-from-no-return
prop_value = self.get_property(prop_name)
if prop_value is not None:
result = {
"name": prop_name,
"namespace": self.name(),
"value": prop_value,
"timeOfSample": dt_util.utcnow().strftime(DATE_FORMAT),
"uncertaintyInMilliseconds": 0,
}
instance = self.instance
if instance is not None:
result["instance"] = instance
yield result
def serialize_capability_resources(self):
"""Return capabilityResources friendlyNames serialized for an API response."""
resources = self.capability_resources()
if resources:
return {"friendlyNames": self.serialize_friendly_names(resources)}
return None
@staticmethod
def serialize_friendly_names(resources):
"""Return capabilityResources, ModeResources, or presetResources friendlyNames serialized for an API response."""
friendly_names = []
for resource in resources:
if resource["type"] == Catalog.LABEL_ASSET:
friendly_names.append(
{
"@type": Catalog.LABEL_ASSET,
"value": {"assetId": resource["value"]},
}
)
else:
friendly_names.append(
{
"@type": Catalog.LABEL_TEXT,
"value": {"text": resource["value"], "locale": "en-US"},
}
)
return friendly_names
class AlexaEndpointHealth(AlexaCapability):
"""Implements Alexa.EndpointHealth.
https://developer.amazon.com/docs/smarthome/state-reporting-for-a-smart-home-skill.html#report-state-when-alexa-requests-it
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.EndpointHealth"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "connectivity"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return False
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "connectivity":
raise UnsupportedProperty(name)
if self.entity.state == STATE_UNAVAILABLE:
return {"value": "UNREACHABLE"}
return {"value": "OK"}
class AlexaPowerController(AlexaCapability):
"""Implements Alexa.PowerController.
https://developer.amazon.com/docs/device-apis/alexa-powercontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PowerController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "powerState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "powerState":
raise UnsupportedProperty(name)
if self.entity.domain == climate.DOMAIN:
is_on = self.entity.state != climate.HVAC_MODE_OFF
else:
is_on = self.entity.state != STATE_OFF
return "ON" if is_on else "OFF"
class AlexaLockController(AlexaCapability):
"""Implements Alexa.LockController.
https://developer.amazon.com/docs/device-apis/alexa-lockcontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.LockController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "lockState"}]
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "lockState":
raise UnsupportedProperty(name)
if self.entity.state == STATE_LOCKED:
return "LOCKED"
if self.entity.state == STATE_UNLOCKED:
return "UNLOCKED"
return "JAMMED"
class AlexaSceneController(AlexaCapability):
"""Implements Alexa.SceneController.
https://developer.amazon.com/docs/device-apis/alexa-scenecontroller.html
"""
def __init__(self, entity, supports_deactivation):
"""Initialize the entity."""
super().__init__(entity)
self.supports_deactivation = lambda: supports_deactivation
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.SceneController"
class AlexaBrightnessController(AlexaCapability):
"""Implements Alexa.BrightnessController.
https://developer.amazon.com/docs/device-apis/alexa-brightnesscontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.BrightnessController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "brightness"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "brightness":
raise UnsupportedProperty(name)
if "brightness" in self.entity.attributes:
return round(self.entity.attributes["brightness"] / 255.0 * 100)
return 0
class AlexaColorController(AlexaCapability):
"""Implements Alexa.ColorController.
https://developer.amazon.com/docs/device-apis/alexa-colorcontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ColorController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "color"}]
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "color":
raise UnsupportedProperty(name)
hue, saturation = self.entity.attributes.get(light.ATTR_HS_COLOR, (0, 0))
return {
"hue": hue,
"saturation": saturation / 100.0,
"brightness": self.entity.attributes.get(light.ATTR_BRIGHTNESS, 0) / 255.0,
}
class AlexaColorTemperatureController(AlexaCapability):
"""Implements Alexa.ColorTemperatureController.
https://developer.amazon.com/docs/device-apis/alexa-colortemperaturecontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ColorTemperatureController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "colorTemperatureInKelvin"}]
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "colorTemperatureInKelvin":
raise UnsupportedProperty(name)
if "color_temp" in self.entity.attributes:
return color_util.color_temperature_mired_to_kelvin(
self.entity.attributes["color_temp"]
)
return None
class AlexaPercentageController(AlexaCapability):
"""Implements Alexa.PercentageController.
https://developer.amazon.com/docs/device-apis/alexa-percentagecontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PercentageController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "percentage"}]
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "percentage":
raise UnsupportedProperty(name)
if self.entity.domain == fan.DOMAIN:
speed = self.entity.attributes.get(fan.ATTR_SPEED)
return PERCENTAGE_FAN_MAP.get(speed, 0)
if self.entity.domain == cover.DOMAIN:
return self.entity.attributes.get(cover.ATTR_CURRENT_POSITION, 0)
return 0
class AlexaSpeaker(AlexaCapability):
"""Implements Alexa.Speaker.
https://developer.amazon.com/docs/device-apis/alexa-speaker.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.Speaker"
class AlexaStepSpeaker(AlexaCapability):
"""Implements Alexa.StepSpeaker.
https://developer.amazon.com/docs/device-apis/alexa-stepspeaker.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.StepSpeaker"
class AlexaPlaybackController(AlexaCapability):
"""Implements Alexa.PlaybackController.
https://developer.amazon.com/docs/device-apis/alexa-playbackcontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PlaybackController"
def supported_operations(self):
"""Return the supportedOperations object.
Supported Operations: FastForward, Next, Pause, Play, Previous, Rewind, StartOver, Stop
"""
supported_features = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
operations = {
media_player.SUPPORT_NEXT_TRACK: "Next",
media_player.SUPPORT_PAUSE: "Pause",
media_player.SUPPORT_PLAY: "Play",
media_player.SUPPORT_PREVIOUS_TRACK: "Previous",
media_player.SUPPORT_STOP: "Stop",
}
supported_operations = []
for operation in operations:
if operation & supported_features:
supported_operations.append(operations[operation])
return supported_operations
class AlexaInputController(AlexaCapability):
"""Implements Alexa.InputController.
https://developer.amazon.com/docs/device-apis/alexa-inputcontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.InputController"
def inputs(self):
"""Return the list of valid supported inputs."""
source_list = self.entity.attributes.get(
media_player.ATTR_INPUT_SOURCE_LIST, []
)
input_list = []
for source in source_list:
formatted_source = (
source.lower().replace("-", "").replace("_", "").replace(" ", "")
)
if formatted_source in Inputs.VALID_SOURCE_NAME_MAP.keys():
input_list.append(
{"name": Inputs.VALID_SOURCE_NAME_MAP[formatted_source]}
)
return input_list
class AlexaTemperatureSensor(AlexaCapability):
"""Implements Alexa.TemperatureSensor.
https://developer.amazon.com/docs/device-apis/alexa-temperaturesensor.html
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.TemperatureSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "temperature"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "temperature":
raise UnsupportedProperty(name)
unit = self.entity.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
temp = self.entity.state
if self.entity.domain == climate.DOMAIN:
unit = self.hass.config.units.temperature_unit
temp = self.entity.attributes.get(climate.ATTR_CURRENT_TEMPERATURE)
if temp in (STATE_UNAVAILABLE, STATE_UNKNOWN, None):
return None
try:
temp = float(temp)
except ValueError:
_LOGGER.warning("Invalid temp value %s for %s", temp, self.entity.entity_id)
return None
return {"value": temp, "scale": API_TEMP_UNITS[unit]}
class AlexaContactSensor(AlexaCapability):
"""Implements Alexa.ContactSensor.
The Alexa.ContactSensor interface describes the properties and events used
to report the state of an endpoint that detects contact between two
surfaces. For example, a contact sensor can report whether a door or window
is open.
https://developer.amazon.com/docs/device-apis/alexa-contactsensor.html
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ContactSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "detectionState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "detectionState":
raise UnsupportedProperty(name)
if self.entity.state == STATE_ON:
return "DETECTED"
return "NOT_DETECTED"
class AlexaMotionSensor(AlexaCapability):
"""Implements Alexa.MotionSensor.
https://developer.amazon.com/docs/device-apis/alexa-motionsensor.html
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.MotionSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "detectionState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "detectionState":
raise UnsupportedProperty(name)
if self.entity.state == STATE_ON:
return "DETECTED"
return "NOT_DETECTED"
class AlexaThermostatController(AlexaCapability):
"""Implements Alexa.ThermostatController.
https://developer.amazon.com/docs/device-apis/alexa-thermostatcontroller.html
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ThermostatController"
def properties_supported(self):
"""Return what properties this entity supports."""
properties = [{"name": "thermostatMode"}]
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & climate.SUPPORT_TARGET_TEMPERATURE:
properties.append({"name": "targetSetpoint"})
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
properties.append({"name": "lowerSetpoint"})
properties.append({"name": "upperSetpoint"})
return properties
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if self.entity.state == STATE_UNAVAILABLE:
return None
if name == "thermostatMode":
preset = self.entity.attributes.get(climate.ATTR_PRESET_MODE)
if preset in API_THERMOSTAT_PRESETS:
mode = API_THERMOSTAT_PRESETS[preset]
else:
mode = API_THERMOSTAT_MODES.get(self.entity.state)
if mode is None:
_LOGGER.error(
"%s (%s) has unsupported state value '%s'",
self.entity.entity_id,
type(self.entity),
self.entity.state,
)
raise UnsupportedProperty(name)
return mode
unit = self.hass.config.units.temperature_unit
if name == "targetSetpoint":
temp = self.entity.attributes.get(ATTR_TEMPERATURE)
elif name == "lowerSetpoint":
temp = self.entity.attributes.get(climate.ATTR_TARGET_TEMP_LOW)
elif name == "upperSetpoint":
temp = self.entity.attributes.get(climate.ATTR_TARGET_TEMP_HIGH)
else:
raise UnsupportedProperty(name)
if temp is None:
return None
try:
temp = float(temp)
except ValueError:
_LOGGER.warning(
"Invalid temp value %s for %s in %s", temp, name, self.entity.entity_id
)
return None
return {"value": temp, "scale": API_TEMP_UNITS[unit]}
def configuration(self):
"""Return configuration object.
Translates climate HVAC_MODES and PRESETS to supported Alexa ThermostatMode Values.
ThermostatMode Value must be AUTO, COOL, HEAT, ECO, OFF, or CUSTOM.
"""
supported_modes = []
hvac_modes = self.entity.attributes.get(climate.ATTR_HVAC_MODES)
for mode in hvac_modes:
thermostat_mode = API_THERMOSTAT_MODES.get(mode)
if thermostat_mode:
supported_modes.append(thermostat_mode)
preset_modes = self.entity.attributes.get(climate.ATTR_PRESET_MODES)
if preset_modes:
for mode in preset_modes:
thermostat_mode = API_THERMOSTAT_PRESETS.get(mode)
if thermostat_mode:
supported_modes.append(thermostat_mode)
# Return False for supportsScheduling until supported with event listener in handler.
configuration = {"supportsScheduling": False}
if supported_modes:
configuration["supportedModes"] = supported_modes
return configuration
class AlexaPowerLevelController(AlexaCapability):
"""Implements Alexa.PowerLevelController.
https://developer.amazon.com/docs/device-apis/alexa-powerlevelcontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PowerLevelController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "powerLevel"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "powerLevel":
raise UnsupportedProperty(name)
if self.entity.domain == fan.DOMAIN:
speed = self.entity.attributes.get(fan.ATTR_SPEED)
return PERCENTAGE_FAN_MAP.get(speed, None)
return None
class AlexaSecurityPanelController(AlexaCapability):
"""Implements Alexa.SecurityPanelController.
https://developer.amazon.com/docs/device-apis/alexa-securitypanelcontroller.html
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.SecurityPanelController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "armState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "armState":
raise UnsupportedProperty(name)
arm_state = self.entity.state
if arm_state == STATE_ALARM_ARMED_HOME:
return "ARMED_STAY"
if arm_state == STATE_ALARM_ARMED_AWAY:
return "ARMED_AWAY"
if arm_state == STATE_ALARM_ARMED_NIGHT:
return "ARMED_NIGHT"
if arm_state == STATE_ALARM_ARMED_CUSTOM_BYPASS:
return "ARMED_STAY"
return "DISARMED"
def configuration(self):
"""Return configuration object with supported authorization types."""
code_format = self.entity.attributes.get(ATTR_CODE_FORMAT)
if code_format == FORMAT_NUMBER:
return {"supportedAuthorizationTypes": [{"type": "FOUR_DIGIT_PIN"}]}
return None
class AlexaModeController(AlexaCapability):
"""Implements Alexa.ModeController.
https://developer.amazon.com/docs/device-apis/alexa-modecontroller.html
"""
def __init__(self, entity, instance, non_controllable=False):
"""Initialize the entity."""
super().__init__(entity, instance)
self.properties_non_controllable = lambda: non_controllable
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ModeController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "mode"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
def get_property(self, name):
"""Read and return a property."""
if name != "mode":
raise UnsupportedProperty(name)
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_DIRECTION}":
return self.entity.attributes.get(fan.ATTR_DIRECTION)
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
return self.entity.attributes.get(cover.ATTR_POSITION)
return None
def configuration(self):
"""Return configuration with modeResources."""
return self.serialize_mode_resources()
def capability_resources(self):
"""Return capabilityResources object."""
capability_resources = []
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_DIRECTION}":
capability_resources = [
{"type": Catalog.LABEL_ASSET, "value": Catalog.SETTING_DIRECTION}
]
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
capability_resources = [
{"type": Catalog.LABEL_ASSET, "value": Catalog.SETTING_MODE},
{"type": Catalog.LABEL_ASSET, "value": Catalog.SETTING_PRESET},
]
return capability_resources
def mode_resources(self):
"""Return modeResources object."""
mode_resources = None
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_DIRECTION}":
mode_resources = {
"ordered": False,
"resources": [
{
"value": f"{fan.ATTR_DIRECTION}.{fan.DIRECTION_FORWARD}",
"friendly_names": [
{"type": Catalog.LABEL_TEXT, "value": fan.DIRECTION_FORWARD}
],
},
{
"value": f"{fan.ATTR_DIRECTION}.{fan.DIRECTION_REVERSE}",
"friendly_names": [
{"type": Catalog.LABEL_TEXT, "value": fan.DIRECTION_REVERSE}
],
},
],
}
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
mode_resources = {
"ordered": False,
"resources": [
{
"value": f"{cover.ATTR_POSITION}.{STATE_OPEN}",
"friendly_names": [
{"type": Catalog.LABEL_TEXT, "value": "open"},
{"type": Catalog.LABEL_TEXT, "value": "opened"},
{"type": Catalog.LABEL_TEXT, "value": "raise"},
{"type": Catalog.LABEL_TEXT, "value": "raised"},
],
},
{
"value": f"{cover.ATTR_POSITION}.{STATE_CLOSED}",
"friendly_names": [
{"type": Catalog.LABEL_TEXT, "value": "close"},
{"type": Catalog.LABEL_TEXT, "value": "closed"},
{"type": Catalog.LABEL_TEXT, "value": "shut"},
{"type": Catalog.LABEL_TEXT, "value": "lower"},
{"type": Catalog.LABEL_TEXT, "value": "lowered"},
],
},
],
}
return mode_resources
def serialize_mode_resources(self):
"""Return ModeResources, friendlyNames serialized for an API response."""
mode_resources = []
resources = self.mode_resources()
ordered = resources["ordered"]
for resource in resources["resources"]:
mode_value = resource["value"]
friendly_names = resource["friendly_names"]
result = {
"value": mode_value,
"modeResources": {
"friendlyNames": self.serialize_friendly_names(friendly_names)
},
}
mode_resources.append(result)
return {"ordered": ordered, "supportedModes": mode_resources}
class AlexaRangeController(AlexaCapability):
"""Implements Alexa.RangeController.
https://developer.amazon.com/docs/device-apis/alexa-rangecontroller.html
"""
def __init__(self, entity, instance, non_controllable=False):
"""Initialize the entity."""
super().__init__(entity, instance)
self.properties_non_controllable = lambda: non_controllable
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.RangeController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "rangeValue"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "rangeValue":
raise UnsupportedProperty(name)
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_SPEED}":
speed = self.entity.attributes.get(fan.ATTR_SPEED)
return RANGE_FAN_MAP.get(speed, 0)
return None
def configuration(self):
"""Return configuration with presetResources."""
return self.serialize_preset_resources()
def capability_resources(self):
"""Return capabilityResources object."""
capability_resources = []
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_SPEED}":
return [{"type": Catalog.LABEL_ASSET, "value": Catalog.SETTING_FANSPEED}]
return capability_resources
def preset_resources(self):
"""Return presetResources object."""
preset_resources = []
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_SPEED}":
preset_resources = {
"minimumValue": 1,
"maximumValue": 3,
"precision": 1,
"presets": [
{
"rangeValue": 1,
"names": [
{
"type": Catalog.LABEL_ASSET,
"value": Catalog.VALUE_MINIMUM,
},
{"type": Catalog.LABEL_ASSET, "value": Catalog.VALUE_LOW},
],
},
{
"rangeValue": 2,
"names": [
{"type": Catalog.LABEL_ASSET, "value": Catalog.VALUE_MEDIUM}
],
},
{
"rangeValue": 3,
"names": [
{
"type": Catalog.LABEL_ASSET,
"value": Catalog.VALUE_MAXIMUM,
},
{"type": Catalog.LABEL_ASSET, "value": Catalog.VALUE_HIGH},
],
},
],
}
return preset_resources
def serialize_preset_resources(self):
"""Return PresetResources, friendlyNames serialized for an API response."""
preset_resources = []
resources = self.preset_resources()
for preset in resources["presets"]:
preset_resources.append(
{
"rangeValue": preset["rangeValue"],
"presetResources": {
"friendlyNames": self.serialize_friendly_names(preset["names"])
},
}
)
return {
"supportedRange": {
"minimumValue": resources["minimumValue"],
"maximumValue": resources["maximumValue"],
"precision": resources["precision"],
},
"presets": preset_resources,
}
class AlexaToggleController(AlexaCapability):
"""Implements Alexa.ToggleController.
https://developer.amazon.com/docs/device-apis/alexa-togglecontroller.html
"""
def __init__(self, entity, instance, non_controllable=False):
"""Initialize the entity."""
super().__init__(entity, instance)
self.properties_non_controllable = lambda: non_controllable
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ToggleController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "toggleState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "toggleState":
raise UnsupportedProperty(name)
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}":
is_on = bool(self.entity.attributes.get(fan.ATTR_OSCILLATING))
return "ON" if is_on else "OFF"
return None
def capability_resources(self):
"""Return capabilityResources object."""
capability_resources = []
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}":
capability_resources = [
{"type": Catalog.LABEL_ASSET, "value": Catalog.SETTING_OSCILLATE},
{"type": Catalog.LABEL_TEXT, "value": "Rotate"},
{"type": Catalog.LABEL_TEXT, "value": "Rotation"},
]
return capability_resources
class AlexaChannelController(AlexaCapability):
"""Implements Alexa.ChannelController.
https://developer.amazon.com/docs/device-apis/alexa-channelcontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ChannelController"
class AlexaDoorbellEventSource(AlexaCapability):
"""Implements Alexa.DoorbellEventSource.
https://developer.amazon.com/docs/device-apis/alexa-doorbelleventsource.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.DoorbellEventSource"
def capability_proactively_reported(self):
"""Return True for proactively reported capability."""
return True
class AlexaPlaybackStateReporter(AlexaCapability):
"""Implements Alexa.PlaybackStateReporter.
https://developer.amazon.com/docs/device-apis/alexa-playbackstatereporter.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PlaybackStateReporter"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "playbackState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "playbackState":
raise UnsupportedProperty(name)
playback_state = self.entity.state
if playback_state == STATE_PLAYING:
return {"state": "PLAYING"}
if playback_state == STATE_PAUSED:
return {"state": "PAUSED"}
return {"state": "STOPPED"}
class AlexaSeekController(AlexaCapability):
"""Implements Alexa.SeekController.
https://developer.amazon.com/docs/device-apis/alexa-seekcontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.SeekController"
| {
"repo_name": "qedi-r/home-assistant",
"path": "homeassistant/components/alexa/capabilities.py",
"copies": "1",
"size": "41019",
"license": "apache-2.0",
"hash": -8649807002126222000,
"line_mean": 31.736632083,
"line_max": 127,
"alpha_frac": 0.5964796801,
"autogenerated": false,
"ratio": 4.559693196976434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5656172877076433,
"avg_score": null,
"num_lines": null
} |
"""Alexa capabilities."""
import logging
from homeassistant.const import (
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
ATTR_UNIT_OF_MEASUREMENT,
STATE_LOCKED,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNLOCKED,
STATE_UNKNOWN,
)
import homeassistant.components.climate.const as climate
from homeassistant.components import light, fan, cover
import homeassistant.util.color as color_util
import homeassistant.util.dt as dt_util
from .const import (
API_TEMP_UNITS,
API_THERMOSTAT_MODES,
API_THERMOSTAT_PRESETS,
DATE_FORMAT,
PERCENTAGE_FAN_MAP,
)
from .errors import UnsupportedProperty
_LOGGER = logging.getLogger(__name__)
class AlexaCapibility:
"""Base class for Alexa capability interfaces.
The Smart Home Skills API defines a number of "capability interfaces",
roughly analogous to domains in Home Assistant. The supported interfaces
describe what actions can be performed on a particular device.
https://developer.amazon.com/docs/device-apis/message-guide.html
"""
def __init__(self, entity):
"""Initialize an Alexa capibility."""
self.entity = entity
def name(self):
"""Return the Alexa API name of this interface."""
raise NotImplementedError
@staticmethod
def properties_supported():
"""Return what properties this entity supports."""
return []
@staticmethod
def properties_proactively_reported():
"""Return True if properties asynchronously reported."""
return False
@staticmethod
def properties_retrievable():
"""Return True if properties can be retrieved."""
return False
@staticmethod
def get_property(name):
"""Read and return a property.
Return value should be a dict, or raise UnsupportedProperty.
Properties can also have a timeOfSample and uncertaintyInMilliseconds,
but returning those metadata is not yet implemented.
"""
raise UnsupportedProperty(name)
@staticmethod
def supports_deactivation():
"""Applicable only to scenes."""
return None
def serialize_discovery(self):
"""Serialize according to the Discovery API."""
result = {
"type": "AlexaInterface",
"interface": self.name(),
"version": "3",
"properties": {
"supported": self.properties_supported(),
"proactivelyReported": self.properties_proactively_reported(),
"retrievable": self.properties_retrievable(),
},
}
# pylint: disable=assignment-from-none
supports_deactivation = self.supports_deactivation()
if supports_deactivation is not None:
result["supportsDeactivation"] = supports_deactivation
return result
def serialize_properties(self):
"""Return properties serialized for an API response."""
for prop in self.properties_supported():
prop_name = prop["name"]
# pylint: disable=assignment-from-no-return
prop_value = self.get_property(prop_name)
if prop_value is not None:
yield {
"name": prop_name,
"namespace": self.name(),
"value": prop_value,
"timeOfSample": dt_util.utcnow().strftime(DATE_FORMAT),
"uncertaintyInMilliseconds": 0,
}
class AlexaEndpointHealth(AlexaCapibility):
"""Implements Alexa.EndpointHealth.
https://developer.amazon.com/docs/smarthome/state-reporting-for-a-smart-home-skill.html#report-state-when-alexa-requests-it
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.EndpointHealth"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "connectivity"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return False
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "connectivity":
raise UnsupportedProperty(name)
if self.entity.state == STATE_UNAVAILABLE:
return {"value": "UNREACHABLE"}
return {"value": "OK"}
class AlexaPowerController(AlexaCapibility):
"""Implements Alexa.PowerController.
https://developer.amazon.com/docs/device-apis/alexa-powercontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PowerController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "powerState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "powerState":
raise UnsupportedProperty(name)
if self.entity.domain == climate.DOMAIN:
is_on = self.entity.state != climate.HVAC_MODE_OFF
else:
is_on = self.entity.state != STATE_OFF
return "ON" if is_on else "OFF"
class AlexaLockController(AlexaCapibility):
"""Implements Alexa.LockController.
https://developer.amazon.com/docs/device-apis/alexa-lockcontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.LockController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "lockState"}]
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "lockState":
raise UnsupportedProperty(name)
if self.entity.state == STATE_LOCKED:
return "LOCKED"
if self.entity.state == STATE_UNLOCKED:
return "UNLOCKED"
return "JAMMED"
class AlexaSceneController(AlexaCapibility):
"""Implements Alexa.SceneController.
https://developer.amazon.com/docs/device-apis/alexa-scenecontroller.html
"""
def __init__(self, entity, supports_deactivation):
"""Initialize the entity."""
super().__init__(entity)
self.supports_deactivation = lambda: supports_deactivation
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.SceneController"
class AlexaBrightnessController(AlexaCapibility):
"""Implements Alexa.BrightnessController.
https://developer.amazon.com/docs/device-apis/alexa-brightnesscontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.BrightnessController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "brightness"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "brightness":
raise UnsupportedProperty(name)
if "brightness" in self.entity.attributes:
return round(self.entity.attributes["brightness"] / 255.0 * 100)
return 0
class AlexaColorController(AlexaCapibility):
"""Implements Alexa.ColorController.
https://developer.amazon.com/docs/device-apis/alexa-colorcontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ColorController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "color"}]
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "color":
raise UnsupportedProperty(name)
hue, saturation = self.entity.attributes.get(light.ATTR_HS_COLOR, (0, 0))
return {
"hue": hue,
"saturation": saturation / 100.0,
"brightness": self.entity.attributes.get(light.ATTR_BRIGHTNESS, 0) / 255.0,
}
class AlexaColorTemperatureController(AlexaCapibility):
"""Implements Alexa.ColorTemperatureController.
https://developer.amazon.com/docs/device-apis/alexa-colortemperaturecontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ColorTemperatureController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "colorTemperatureInKelvin"}]
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "colorTemperatureInKelvin":
raise UnsupportedProperty(name)
if "color_temp" in self.entity.attributes:
return color_util.color_temperature_mired_to_kelvin(
self.entity.attributes["color_temp"]
)
return 0
class AlexaPercentageController(AlexaCapibility):
"""Implements Alexa.PercentageController.
https://developer.amazon.com/docs/device-apis/alexa-percentagecontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PercentageController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "percentage"}]
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "percentage":
raise UnsupportedProperty(name)
if self.entity.domain == fan.DOMAIN:
speed = self.entity.attributes.get(fan.ATTR_SPEED)
return PERCENTAGE_FAN_MAP.get(speed, 0)
if self.entity.domain == cover.DOMAIN:
return self.entity.attributes.get(cover.ATTR_CURRENT_POSITION, 0)
return 0
class AlexaSpeaker(AlexaCapibility):
"""Implements Alexa.Speaker.
https://developer.amazon.com/docs/device-apis/alexa-speaker.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.Speaker"
class AlexaStepSpeaker(AlexaCapibility):
"""Implements Alexa.StepSpeaker.
https://developer.amazon.com/docs/device-apis/alexa-stepspeaker.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.StepSpeaker"
class AlexaPlaybackController(AlexaCapibility):
"""Implements Alexa.PlaybackController.
https://developer.amazon.com/docs/device-apis/alexa-playbackcontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PlaybackController"
class AlexaInputController(AlexaCapibility):
"""Implements Alexa.InputController.
https://developer.amazon.com/docs/device-apis/alexa-inputcontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.InputController"
class AlexaTemperatureSensor(AlexaCapibility):
"""Implements Alexa.TemperatureSensor.
https://developer.amazon.com/docs/device-apis/alexa-temperaturesensor.html
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.TemperatureSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "temperature"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "temperature":
raise UnsupportedProperty(name)
unit = self.entity.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
temp = self.entity.state
if self.entity.domain == climate.DOMAIN:
unit = self.hass.config.units.temperature_unit
temp = self.entity.attributes.get(climate.ATTR_CURRENT_TEMPERATURE)
if temp in (STATE_UNAVAILABLE, STATE_UNKNOWN):
return None
try:
temp = float(temp)
except ValueError:
_LOGGER.warning("Invalid temp value %s for %s", temp, self.entity.entity_id)
return None
return {"value": temp, "scale": API_TEMP_UNITS[unit]}
class AlexaContactSensor(AlexaCapibility):
"""Implements Alexa.ContactSensor.
The Alexa.ContactSensor interface describes the properties and events used
to report the state of an endpoint that detects contact between two
surfaces. For example, a contact sensor can report whether a door or window
is open.
https://developer.amazon.com/docs/device-apis/alexa-contactsensor.html
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ContactSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "detectionState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "detectionState":
raise UnsupportedProperty(name)
if self.entity.state == STATE_ON:
return "DETECTED"
return "NOT_DETECTED"
class AlexaMotionSensor(AlexaCapibility):
"""Implements Alexa.MotionSensor.
https://developer.amazon.com/docs/device-apis/alexa-motionsensor.html
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.MotionSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "detectionState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "detectionState":
raise UnsupportedProperty(name)
if self.entity.state == STATE_ON:
return "DETECTED"
return "NOT_DETECTED"
class AlexaThermostatController(AlexaCapibility):
"""Implements Alexa.ThermostatController.
https://developer.amazon.com/docs/device-apis/alexa-thermostatcontroller.html
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ThermostatController"
def properties_supported(self):
"""Return what properties this entity supports."""
properties = [{"name": "thermostatMode"}]
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & climate.SUPPORT_TARGET_TEMPERATURE:
properties.append({"name": "targetSetpoint"})
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
properties.append({"name": "lowerSetpoint"})
properties.append({"name": "upperSetpoint"})
return properties
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name == "thermostatMode":
preset = self.entity.attributes.get(climate.ATTR_PRESET_MODE)
if preset in API_THERMOSTAT_PRESETS:
mode = API_THERMOSTAT_PRESETS[preset]
else:
mode = API_THERMOSTAT_MODES.get(self.entity.state)
if mode is None:
_LOGGER.error(
"%s (%s) has unsupported state value '%s'",
self.entity.entity_id,
type(self.entity),
self.entity.state,
)
raise UnsupportedProperty(name)
return mode
unit = self.hass.config.units.temperature_unit
if name == "targetSetpoint":
temp = self.entity.attributes.get(ATTR_TEMPERATURE)
elif name == "lowerSetpoint":
temp = self.entity.attributes.get(climate.ATTR_TARGET_TEMP_LOW)
elif name == "upperSetpoint":
temp = self.entity.attributes.get(climate.ATTR_TARGET_TEMP_HIGH)
else:
raise UnsupportedProperty(name)
if temp is None:
return None
try:
temp = float(temp)
except ValueError:
_LOGGER.warning(
"Invalid temp value %s for %s in %s", temp, name, self.entity.entity_id
)
return None
return {"value": temp, "scale": API_TEMP_UNITS[unit]}
| {
"repo_name": "Cinntax/home-assistant",
"path": "homeassistant/components/alexa/capabilities.py",
"copies": "1",
"size": "19019",
"license": "apache-2.0",
"hash": 5174968461482526000,
"line_mean": 30.0261011419,
"line_max": 127,
"alpha_frac": 0.6276355224,
"autogenerated": false,
"ratio": 4.407647740440324,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5535283262840324,
"avg_score": null,
"num_lines": null
} |
"""Alexa configuration for Home Assistant Cloud."""
import asyncio
from contextlib import suppress
from datetime import timedelta
import logging
import aiohttp
import async_timeout
from hass_nabucasa import Cloud, cloud_api
from homeassistant.components.alexa import (
config as alexa_config,
entities as alexa_entities,
errors as alexa_errors,
state_report as alexa_state_report,
)
from homeassistant.const import CLOUD_NEVER_EXPOSED_ENTITIES, HTTP_BAD_REQUEST
from homeassistant.core import HomeAssistant, callback, split_entity_id
from homeassistant.helpers import entity_registry
from homeassistant.helpers.event import async_call_later
from homeassistant.util.dt import utcnow
from .const import CONF_ENTITY_CONFIG, CONF_FILTER, PREF_SHOULD_EXPOSE, RequireRelink
from .prefs import CloudPreferences
_LOGGER = logging.getLogger(__name__)
# Time to wait when entity preferences have changed before syncing it to
# the cloud.
SYNC_DELAY = 1
class AlexaConfig(alexa_config.AbstractConfig):
"""Alexa Configuration."""
def __init__(
self,
hass: HomeAssistant,
config: dict,
cloud_user: str,
prefs: CloudPreferences,
cloud: Cloud,
):
"""Initialize the Alexa config."""
super().__init__(hass)
self._config = config
self._cloud_user = cloud_user
self._prefs = prefs
self._cloud = cloud
self._token = None
self._token_valid = None
self._cur_entity_prefs = prefs.alexa_entity_configs
self._cur_default_expose = prefs.alexa_default_expose
self._alexa_sync_unsub = None
self._endpoint = None
prefs.async_listen_updates(self._async_prefs_updated)
hass.bus.async_listen(
entity_registry.EVENT_ENTITY_REGISTRY_UPDATED,
self._handle_entity_registry_updated,
)
@property
def enabled(self):
"""Return if Alexa is enabled."""
return (
self._cloud.is_logged_in
and not self._cloud.subscription_expired
and self._prefs.alexa_enabled
)
@property
def supports_auth(self):
"""Return if config supports auth."""
return True
@property
def should_report_state(self):
"""Return if states should be proactively reported."""
return self._prefs.alexa_report_state
@property
def endpoint(self):
"""Endpoint for report state."""
if self._endpoint is None:
raise ValueError("No endpoint available. Fetch access token first")
return self._endpoint
@property
def locale(self):
"""Return config locale."""
# Not clear how to determine locale atm.
return "en-US"
@property
def entity_config(self):
"""Return entity config."""
return self._config.get(CONF_ENTITY_CONFIG) or {}
@callback
def user_identifier(self):
"""Return an identifier for the user that represents this config."""
return self._cloud_user
def should_expose(self, entity_id):
"""If an entity should be exposed."""
if entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
return False
if not self._config[CONF_FILTER].empty_filter:
return self._config[CONF_FILTER](entity_id)
entity_configs = self._prefs.alexa_entity_configs
entity_config = entity_configs.get(entity_id, {})
entity_expose = entity_config.get(PREF_SHOULD_EXPOSE)
if entity_expose is not None:
return entity_expose
default_expose = self._prefs.alexa_default_expose
# Backwards compat
if default_expose is None:
return True
return split_entity_id(entity_id)[0] in default_expose
@callback
def async_invalidate_access_token(self):
"""Invalidate access token."""
self._token_valid = None
async def async_get_access_token(self):
"""Get an access token."""
if self._token_valid is not None and self._token_valid > utcnow():
return self._token
resp = await cloud_api.async_alexa_access_token(self._cloud)
body = await resp.json()
if resp.status == HTTP_BAD_REQUEST:
if body["reason"] in ("RefreshTokenNotFound", "UnknownRegion"):
if self.should_report_state:
await self._prefs.async_update(alexa_report_state=False)
self.hass.components.persistent_notification.async_create(
f"There was an error reporting state to Alexa ({body['reason']}). "
"Please re-link your Alexa skill via the Alexa app to "
"continue using it.",
"Alexa state reporting disabled",
"cloud_alexa_report",
)
raise RequireRelink
raise alexa_errors.NoTokenAvailable
self._token = body["access_token"]
self._endpoint = body["event_endpoint"]
self._token_valid = utcnow() + timedelta(seconds=body["expires_in"])
return self._token
async def _async_prefs_updated(self, prefs):
"""Handle updated preferences."""
if self.should_report_state != self.is_reporting_states:
if self.should_report_state:
await self.async_enable_proactive_mode()
else:
await self.async_disable_proactive_mode()
# State reporting is reported as a property on entities.
# So when we change it, we need to sync all entities.
await self.async_sync_entities()
return
# If user has filter in config.yaml, don't sync.
if not self._config[CONF_FILTER].empty_filter:
return
# If entity prefs are the same, don't sync.
if (
self._cur_entity_prefs is prefs.alexa_entity_configs
and self._cur_default_expose is prefs.alexa_default_expose
):
return
if self._alexa_sync_unsub:
self._alexa_sync_unsub()
self._alexa_sync_unsub = None
if self._cur_default_expose is not prefs.alexa_default_expose:
await self.async_sync_entities()
return
self._alexa_sync_unsub = async_call_later(
self.hass, SYNC_DELAY, self._sync_prefs
)
async def _sync_prefs(self, _now):
"""Sync the updated preferences to Alexa."""
self._alexa_sync_unsub = None
old_prefs = self._cur_entity_prefs
new_prefs = self._prefs.alexa_entity_configs
seen = set()
to_update = []
to_remove = []
for entity_id, info in old_prefs.items():
seen.add(entity_id)
old_expose = info.get(PREF_SHOULD_EXPOSE)
if entity_id in new_prefs:
new_expose = new_prefs[entity_id].get(PREF_SHOULD_EXPOSE)
else:
new_expose = None
if old_expose == new_expose:
continue
if new_expose:
to_update.append(entity_id)
else:
to_remove.append(entity_id)
# Now all the ones that are in new prefs but never were in old prefs
for entity_id, info in new_prefs.items():
if entity_id in seen:
continue
new_expose = info.get(PREF_SHOULD_EXPOSE)
if new_expose is None:
continue
# Only test if we should expose. It can never be a remove action,
# as it didn't exist in old prefs object.
if new_expose:
to_update.append(entity_id)
# We only set the prefs when update is successful, that way we will
# retry when next change comes in.
if await self._sync_helper(to_update, to_remove):
self._cur_entity_prefs = new_prefs
async def async_sync_entities(self):
"""Sync all entities to Alexa."""
# Remove any pending sync
if self._alexa_sync_unsub:
self._alexa_sync_unsub()
self._alexa_sync_unsub = None
to_update = []
to_remove = []
for entity in alexa_entities.async_get_entities(self.hass, self):
if self.should_expose(entity.entity_id):
to_update.append(entity.entity_id)
else:
to_remove.append(entity.entity_id)
return await self._sync_helper(to_update, to_remove)
async def _sync_helper(self, to_update, to_remove) -> bool:
"""Sync entities to Alexa.
Return boolean if it was successful.
"""
if not to_update and not to_remove:
return True
# Make sure it's valid.
await self.async_get_access_token()
tasks = []
if to_update:
tasks.append(
alexa_state_report.async_send_add_or_update_message(
self.hass, self, to_update
)
)
if to_remove:
tasks.append(
alexa_state_report.async_send_delete_message(self.hass, self, to_remove)
)
try:
with async_timeout.timeout(10):
await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)
return True
except asyncio.TimeoutError:
_LOGGER.warning("Timeout trying to sync entities to Alexa")
return False
except aiohttp.ClientError as err:
_LOGGER.warning("Error trying to sync entities to Alexa: %s", err)
return False
async def _handle_entity_registry_updated(self, event):
"""Handle when entity registry updated."""
if not self.enabled or not self._cloud.is_logged_in:
return
entity_id = event.data["entity_id"]
if not self.should_expose(entity_id):
return
action = event.data["action"]
to_update = []
to_remove = []
if action == "create":
to_update.append(entity_id)
elif action == "remove":
to_remove.append(entity_id)
elif action == "update" and bool(
set(event.data["changes"]) & entity_registry.ENTITY_DESCRIBING_ATTRIBUTES
):
to_update.append(entity_id)
if "old_entity_id" in event.data:
to_remove.append(event.data["old_entity_id"])
with suppress(alexa_errors.NoTokenAvailable):
await self._sync_helper(to_update, to_remove)
| {
"repo_name": "adrienbrault/home-assistant",
"path": "homeassistant/components/cloud/alexa_config.py",
"copies": "3",
"size": "10583",
"license": "mit",
"hash": 8145811865807830000,
"line_mean": 31.3639143731,
"line_max": 91,
"alpha_frac": 0.5880185203,
"autogenerated": false,
"ratio": 4.207952286282306,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00013871967653535,
"num_lines": 327
} |
"""Alexa configuration for Home Assistant Cloud."""
import asyncio
from datetime import timedelta
import logging
import aiohttp
import async_timeout
from hass_nabucasa import cloud_api
from homeassistant.const import CLOUD_NEVER_EXPOSED_ENTITIES
from homeassistant.helpers import entity_registry
from homeassistant.helpers.event import async_call_later
from homeassistant.util.dt import utcnow
from homeassistant.components.alexa import (
config as alexa_config,
errors as alexa_errors,
entities as alexa_entities,
state_report as alexa_state_report,
)
from .const import (
CONF_ENTITY_CONFIG,
CONF_FILTER,
PREF_SHOULD_EXPOSE,
DEFAULT_SHOULD_EXPOSE,
RequireRelink,
)
_LOGGER = logging.getLogger(__name__)
# Time to wait when entity preferences have changed before syncing it to
# the cloud.
SYNC_DELAY = 1
class AlexaConfig(alexa_config.AbstractConfig):
"""Alexa Configuration."""
def __init__(self, hass, config, prefs, cloud):
"""Initialize the Alexa config."""
super().__init__(hass)
self._config = config
self._prefs = prefs
self._cloud = cloud
self._token = None
self._token_valid = None
self._cur_entity_prefs = prefs.alexa_entity_configs
self._alexa_sync_unsub = None
self._endpoint = None
prefs.async_listen_updates(self._async_prefs_updated)
hass.bus.async_listen(
entity_registry.EVENT_ENTITY_REGISTRY_UPDATED,
self._handle_entity_registry_updated,
)
@property
def enabled(self):
"""Return if Alexa is enabled."""
return self._prefs.alexa_enabled
@property
def supports_auth(self):
"""Return if config supports auth."""
return True
@property
def should_report_state(self):
"""Return if states should be proactively reported."""
return self._prefs.alexa_report_state
@property
def endpoint(self):
"""Endpoint for report state."""
if self._endpoint is None:
raise ValueError("No endpoint available. Fetch access token first")
return self._endpoint
@property
def entity_config(self):
"""Return entity config."""
return self._config.get(CONF_ENTITY_CONFIG) or {}
def should_expose(self, entity_id):
"""If an entity should be exposed."""
if entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
return False
if not self._config[CONF_FILTER].empty_filter:
return self._config[CONF_FILTER](entity_id)
entity_configs = self._prefs.alexa_entity_configs
entity_config = entity_configs.get(entity_id, {})
return entity_config.get(PREF_SHOULD_EXPOSE, DEFAULT_SHOULD_EXPOSE)
async def async_get_access_token(self):
"""Get an access token."""
if self._token_valid is not None and self._token_valid < utcnow():
return self._token
resp = await cloud_api.async_alexa_access_token(self._cloud)
body = await resp.json()
if resp.status == 400:
if body["reason"] in ("RefreshTokenNotFound", "UnknownRegion"):
if self.should_report_state:
await self._prefs.async_update(alexa_report_state=False)
self.hass.components.persistent_notification.async_create(
"There was an error reporting state to Alexa ({}). "
"Please re-link your Alexa skill via the Alexa app to "
"continue using it.".format(body["reason"]),
"Alexa state reporting disabled",
"cloud_alexa_report",
)
raise RequireRelink
raise alexa_errors.NoTokenAvailable
self._token = body["access_token"]
self._endpoint = body["event_endpoint"]
self._token_valid = utcnow() + timedelta(seconds=body["expires_in"])
return self._token
async def _async_prefs_updated(self, prefs):
"""Handle updated preferences."""
if self.should_report_state != self.is_reporting_states:
if self.should_report_state:
await self.async_enable_proactive_mode()
else:
await self.async_disable_proactive_mode()
# State reporting is reported as a property on entities.
# So when we change it, we need to sync all entities.
await self.async_sync_entities()
return
# If entity prefs are the same or we have filter in config.yaml,
# don't sync.
if (
self._cur_entity_prefs is prefs.alexa_entity_configs
or not self._config[CONF_FILTER].empty_filter
):
return
if self._alexa_sync_unsub:
self._alexa_sync_unsub()
self._alexa_sync_unsub = async_call_later(
self.hass, SYNC_DELAY, self._sync_prefs
)
async def _sync_prefs(self, _now):
"""Sync the updated preferences to Alexa."""
self._alexa_sync_unsub = None
old_prefs = self._cur_entity_prefs
new_prefs = self._prefs.alexa_entity_configs
seen = set()
to_update = []
to_remove = []
for entity_id, info in old_prefs.items():
seen.add(entity_id)
old_expose = info.get(PREF_SHOULD_EXPOSE)
if entity_id in new_prefs:
new_expose = new_prefs[entity_id].get(PREF_SHOULD_EXPOSE)
else:
new_expose = None
if old_expose == new_expose:
continue
if new_expose:
to_update.append(entity_id)
else:
to_remove.append(entity_id)
# Now all the ones that are in new prefs but never were in old prefs
for entity_id, info in new_prefs.items():
if entity_id in seen:
continue
new_expose = info.get(PREF_SHOULD_EXPOSE)
if new_expose is None:
continue
# Only test if we should expose. It can never be a remove action,
# as it didn't exist in old prefs object.
if new_expose:
to_update.append(entity_id)
# We only set the prefs when update is successful, that way we will
# retry when next change comes in.
if await self._sync_helper(to_update, to_remove):
self._cur_entity_prefs = new_prefs
async def async_sync_entities(self):
"""Sync all entities to Alexa."""
# Remove any pending sync
if self._alexa_sync_unsub:
self._alexa_sync_unsub()
self._alexa_sync_unsub = None
to_update = []
to_remove = []
for entity in alexa_entities.async_get_entities(self.hass, self):
if self.should_expose(entity.entity_id):
to_update.append(entity.entity_id)
else:
to_remove.append(entity.entity_id)
return await self._sync_helper(to_update, to_remove)
async def _sync_helper(self, to_update, to_remove) -> bool:
"""Sync entities to Alexa.
Return boolean if it was successful.
"""
if not to_update and not to_remove:
return True
# Make sure it's valid.
await self.async_get_access_token()
tasks = []
if to_update:
tasks.append(
alexa_state_report.async_send_add_or_update_message(
self.hass, self, to_update
)
)
if to_remove:
tasks.append(
alexa_state_report.async_send_delete_message(self.hass, self, to_remove)
)
try:
with async_timeout.timeout(10):
await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)
return True
except asyncio.TimeoutError:
_LOGGER.warning("Timeout trying to sync entitites to Alexa")
return False
except aiohttp.ClientError as err:
_LOGGER.warning("Error trying to sync entities to Alexa: %s", err)
return False
async def _handle_entity_registry_updated(self, event):
"""Handle when entity registry updated."""
if not self.enabled or not self._cloud.is_logged_in:
return
action = event.data["action"]
entity_id = event.data["entity_id"]
to_update = []
to_remove = []
if action == "create" and self.should_expose(entity_id):
to_update.append(entity_id)
elif action == "remove" and self.should_expose(entity_id):
to_remove.append(entity_id)
try:
await self._sync_helper(to_update, to_remove)
except alexa_errors.NoTokenAvailable:
pass
| {
"repo_name": "fbradyirl/home-assistant",
"path": "homeassistant/components/cloud/alexa_config.py",
"copies": "1",
"size": "8884",
"license": "apache-2.0",
"hash": 2052722925840038000,
"line_mean": 31.1884057971,
"line_max": 88,
"alpha_frac": 0.585434489,
"autogenerated": false,
"ratio": 4.159176029962547,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5244610518962547,
"avg_score": null,
"num_lines": null
} |
"""Alexa entity adapters."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING
from homeassistant.components import (
alarm_control_panel,
alert,
automation,
binary_sensor,
camera,
cover,
fan,
group,
image_processing,
input_boolean,
input_number,
light,
lock,
media_player,
scene,
script,
sensor,
switch,
timer,
vacuum,
)
from homeassistant.components.climate import const as climate
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_SUPPORTED_FEATURES,
ATTR_UNIT_OF_MEASUREMENT,
CLOUD_NEVER_EXPOSED_ENTITIES,
CONF_DESCRIPTION,
CONF_NAME,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
__version__,
)
from homeassistant.core import HomeAssistant, State, callback
from homeassistant.helpers import network
from homeassistant.helpers.entity import entity_sources
from homeassistant.util.decorator import Registry
from .capabilities import (
Alexa,
AlexaBrightnessController,
AlexaCameraStreamController,
AlexaCapability,
AlexaChannelController,
AlexaColorController,
AlexaColorTemperatureController,
AlexaContactSensor,
AlexaDoorbellEventSource,
AlexaEndpointHealth,
AlexaEqualizerController,
AlexaEventDetectionSensor,
AlexaInputController,
AlexaLockController,
AlexaModeController,
AlexaMotionSensor,
AlexaPercentageController,
AlexaPlaybackController,
AlexaPlaybackStateReporter,
AlexaPowerController,
AlexaPowerLevelController,
AlexaRangeController,
AlexaSceneController,
AlexaSecurityPanelController,
AlexaSeekController,
AlexaSpeaker,
AlexaStepSpeaker,
AlexaTemperatureSensor,
AlexaThermostatController,
AlexaTimeHoldController,
AlexaToggleController,
)
from .const import CONF_DISPLAY_CATEGORIES
if TYPE_CHECKING:
from .config import AbstractConfig
_LOGGER = logging.getLogger(__name__)
ENTITY_ADAPTERS = Registry()
TRANSLATION_TABLE = dict.fromkeys(map(ord, r"}{\/|\"()[]+~!><*%"), None)
class DisplayCategory:
"""Possible display categories for Discovery response.
https://developer.amazon.com/docs/device-apis/alexa-discovery.html#display-categories
"""
# Describes a combination of devices set to a specific state, when the
# state change must occur in a specific order. For example, a "watch
# Netflix" scene might require the: 1. TV to be powered on & 2. Input set
# to HDMI1. Applies to Scenes
ACTIVITY_TRIGGER = "ACTIVITY_TRIGGER"
# Indicates a device that emits pleasant odors and masks unpleasant odors in interior spaces.
AIR_FRESHENER = "AIR_FRESHENER"
# Indicates a device that improves the quality of air in interior spaces.
AIR_PURIFIER = "AIR_PURIFIER"
# Indicates a smart device in an automobile, such as a dash camera.
AUTO_ACCESSORY = "AUTO_ACCESSORY"
# Indicates a security device with video or photo functionality.
CAMERA = "CAMERA"
# Indicates a religious holiday decoration that often contains lights.
CHRISTMAS_TREE = "CHRISTMAS_TREE"
# Indicates a device that makes coffee.
COFFEE_MAKER = "COFFEE_MAKER"
# Indicates a non-mobile computer, such as a desktop computer.
COMPUTER = "COMPUTER"
# Indicates an endpoint that detects and reports contact.
CONTACT_SENSOR = "CONTACT_SENSOR"
# Indicates a door.
DOOR = "DOOR"
# Indicates a doorbell.
DOORBELL = "DOORBELL"
# Indicates a window covering on the outside of a structure.
EXTERIOR_BLIND = "EXTERIOR_BLIND"
# Indicates a fan.
FAN = "FAN"
# Indicates a game console, such as Microsoft Xbox or Nintendo Switch
GAME_CONSOLE = "GAME_CONSOLE"
# Indicates a garage door.
# Garage doors must implement the ModeController interface to open and close the door.
GARAGE_DOOR = "GARAGE_DOOR"
# Indicates a wearable device that transmits audio directly into the ear.
HEADPHONES = "HEADPHONES"
# Indicates a smart-home hub.
HUB = "HUB"
# Indicates a window covering on the inside of a structure.
INTERIOR_BLIND = "INTERIOR_BLIND"
# Indicates a laptop or other mobile computer.
LAPTOP = "LAPTOP"
# Indicates light sources or fixtures.
LIGHT = "LIGHT"
# Indicates a microwave oven.
MICROWAVE = "MICROWAVE"
# Indicates a mobile phone.
MOBILE_PHONE = "MOBILE_PHONE"
# Indicates an endpoint that detects and reports motion.
MOTION_SENSOR = "MOTION_SENSOR"
# Indicates a network-connected music system.
MUSIC_SYSTEM = "MUSIC_SYSTEM"
# Indicates a network router.
NETWORK_HARDWARE = "NETWORK_HARDWARE"
# An endpoint that cannot be described in on of the other categories.
OTHER = "OTHER"
# Indicates an oven cooking appliance.
OVEN = "OVEN"
# Indicates a non-mobile phone, such as landline or an IP phone.
PHONE = "PHONE"
# Indicates a device that prints.
PRINTER = "PRINTER"
# Indicates a network router.
ROUTER = "ROUTER"
# Describes a combination of devices set to a specific state, when the
# order of the state change is not important. For example a bedtime scene
# might include turning off lights and lowering the thermostat, but the
# order is unimportant. Applies to Scenes
SCENE_TRIGGER = "SCENE_TRIGGER"
# Indicates a projector screen.
SCREEN = "SCREEN"
# Indicates a security panel.
SECURITY_PANEL = "SECURITY_PANEL"
# Indicates a security system.
SECURITY_SYSTEM = "SECURITY_SYSTEM"
# Indicates an electric cooking device that sits on a countertop, cooks at low temperatures,
# and is often shaped like a cooking pot.
SLOW_COOKER = "SLOW_COOKER"
# Indicates an endpoint that locks.
SMARTLOCK = "SMARTLOCK"
# Indicates modules that are plugged into an existing electrical outlet.
# Can control a variety of devices.
SMARTPLUG = "SMARTPLUG"
# Indicates the endpoint is a speaker or speaker system.
SPEAKER = "SPEAKER"
# Indicates a streaming device such as Apple TV, Chromecast, or Roku.
STREAMING_DEVICE = "STREAMING_DEVICE"
# Indicates in-wall switches wired to the electrical system. Can control a
# variety of devices.
SWITCH = "SWITCH"
# Indicates a tablet computer.
TABLET = "TABLET"
# Indicates endpoints that report the temperature only.
TEMPERATURE_SENSOR = "TEMPERATURE_SENSOR"
# Indicates endpoints that control temperature, stand-alone air
# conditioners, or heaters with direct temperature control.
THERMOSTAT = "THERMOSTAT"
# Indicates the endpoint is a television.
TV = "TV"
# Indicates a vacuum cleaner.
VACUUM_CLEANER = "VACUUM_CLEANER"
# Indicates a network-connected wearable device, such as an Apple Watch, Fitbit, or Samsung Gear.
WEARABLE = "WEARABLE"
def generate_alexa_id(entity_id: str) -> str:
"""Return the alexa ID for an entity ID."""
return entity_id.replace(".", "#").translate(TRANSLATION_TABLE)
class AlexaEntity:
"""An adaptation of an entity, expressed in Alexa's terms.
The API handlers should manipulate entities only through this interface.
"""
def __init__(
self, hass: HomeAssistant, config: AbstractConfig, entity: State
) -> None:
"""Initialize Alexa Entity."""
self.hass = hass
self.config = config
self.entity = entity
self.entity_conf = config.entity_config.get(entity.entity_id, {})
@property
def entity_id(self):
"""Return the Entity ID."""
return self.entity.entity_id
def friendly_name(self):
"""Return the Alexa API friendly name."""
return self.entity_conf.get(CONF_NAME, self.entity.name).translate(
TRANSLATION_TABLE
)
def description(self):
"""Return the Alexa API description."""
description = self.entity_conf.get(CONF_DESCRIPTION) or self.entity_id
return f"{description} via Home Assistant".translate(TRANSLATION_TABLE)
def alexa_id(self):
"""Return the Alexa API entity id."""
return generate_alexa_id(self.entity.entity_id)
def display_categories(self):
"""Return a list of display categories."""
entity_conf = self.config.entity_config.get(self.entity.entity_id, {})
if CONF_DISPLAY_CATEGORIES in entity_conf:
return [entity_conf[CONF_DISPLAY_CATEGORIES]]
return self.default_display_categories()
def default_display_categories(self):
"""Return a list of default display categories.
This can be overridden by the user in the Home Assistant configuration.
See also DisplayCategory.
"""
raise NotImplementedError
def get_interface(self, capability) -> AlexaCapability:
"""Return the given AlexaInterface.
Raises _UnsupportedInterface.
"""
def interfaces(self) -> list[AlexaCapability]:
"""Return a list of supported interfaces.
Used for discovery. The list should contain AlexaInterface instances.
If the list is empty, this entity will not be discovered.
"""
raise NotImplementedError
def serialize_properties(self):
"""Yield each supported property in API format."""
for interface in self.interfaces():
if not interface.properties_proactively_reported():
continue
yield from interface.serialize_properties()
def serialize_discovery(self):
"""Serialize the entity for discovery."""
result = {
"displayCategories": self.display_categories(),
"cookie": {},
"endpointId": self.alexa_id(),
"friendlyName": self.friendly_name(),
"description": self.description(),
"manufacturerName": "Home Assistant",
"additionalAttributes": {
"manufacturer": "Home Assistant",
"model": self.entity.domain,
"softwareVersion": __version__,
"customIdentifier": f"{self.config.user_identifier()}-{self.entity_id}",
},
}
locale = self.config.locale
capabilities = []
for i in self.interfaces():
if locale not in i.supported_locales:
continue
try:
capabilities.append(i.serialize_discovery())
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error serializing %s discovery for %s", i.name(), self.entity
)
result["capabilities"] = capabilities
return result
@callback
def async_get_entities(hass, config) -> list[AlexaEntity]:
"""Return all entities that are supported by Alexa."""
entities = []
for state in hass.states.async_all():
if state.entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
continue
if state.domain not in ENTITY_ADAPTERS:
continue
alexa_entity = ENTITY_ADAPTERS[state.domain](hass, config, state)
if not list(alexa_entity.interfaces()):
continue
entities.append(alexa_entity)
return entities
@ENTITY_ADAPTERS.register(alert.DOMAIN)
@ENTITY_ADAPTERS.register(automation.DOMAIN)
@ENTITY_ADAPTERS.register(group.DOMAIN)
@ENTITY_ADAPTERS.register(input_boolean.DOMAIN)
class GenericCapabilities(AlexaEntity):
"""A generic, on/off device.
The choice of last resort.
"""
def default_display_categories(self):
"""Return the display categories for this entity."""
if self.entity.domain == automation.DOMAIN:
return [DisplayCategory.ACTIVITY_TRIGGER]
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaPowerController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(switch.DOMAIN)
class SwitchCapabilities(AlexaEntity):
"""Class to represent Switch capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class == switch.DEVICE_CLASS_OUTLET:
return [DisplayCategory.SMARTPLUG]
return [DisplayCategory.SWITCH]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaPowerController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(climate.DOMAIN)
class ClimateCapabilities(AlexaEntity):
"""Class to represent Climate capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.THERMOSTAT]
def interfaces(self):
"""Yield the supported interfaces."""
# If we support two modes, one being off, we allow turning on too.
if climate.HVAC_MODE_OFF in self.entity.attributes.get(
climate.ATTR_HVAC_MODES, []
):
yield AlexaPowerController(self.entity)
yield AlexaThermostatController(self.hass, self.entity)
yield AlexaTemperatureSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(cover.DOMAIN)
class CoverCapabilities(AlexaEntity):
"""Class to represent Cover capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class in (cover.DEVICE_CLASS_GARAGE, cover.DEVICE_CLASS_GATE):
return [DisplayCategory.GARAGE_DOOR]
if device_class == cover.DEVICE_CLASS_DOOR:
return [DisplayCategory.DOOR]
if device_class in (
cover.DEVICE_CLASS_BLIND,
cover.DEVICE_CLASS_SHADE,
cover.DEVICE_CLASS_CURTAIN,
):
return [DisplayCategory.INTERIOR_BLIND]
if device_class in (
cover.DEVICE_CLASS_WINDOW,
cover.DEVICE_CLASS_AWNING,
cover.DEVICE_CLASS_SHUTTER,
):
return [DisplayCategory.EXTERIOR_BLIND]
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class not in (cover.DEVICE_CLASS_GARAGE, cover.DEVICE_CLASS_GATE):
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & cover.SUPPORT_SET_POSITION:
yield AlexaRangeController(
self.entity, instance=f"{cover.DOMAIN}.{cover.ATTR_POSITION}"
)
elif supported & (cover.SUPPORT_CLOSE | cover.SUPPORT_OPEN):
yield AlexaModeController(
self.entity, instance=f"{cover.DOMAIN}.{cover.ATTR_POSITION}"
)
if supported & cover.SUPPORT_SET_TILT_POSITION:
yield AlexaRangeController(self.entity, instance=f"{cover.DOMAIN}.tilt")
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(light.DOMAIN)
class LightCapabilities(AlexaEntity):
"""Class to represent Light capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.LIGHT]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
color_modes = self.entity.attributes.get(light.ATTR_SUPPORTED_COLOR_MODES)
if light.brightness_supported(color_modes):
yield AlexaBrightnessController(self.entity)
if light.color_supported(color_modes):
yield AlexaColorController(self.entity)
if light.color_temp_supported(color_modes):
yield AlexaColorTemperatureController(self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(fan.DOMAIN)
class FanCapabilities(AlexaEntity):
"""Class to represent Fan capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.FAN]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & fan.SUPPORT_SET_SPEED:
yield AlexaPercentageController(self.entity)
yield AlexaPowerLevelController(self.entity)
# The use of legacy speeds is deprecated in the schema, support will be removed after a quarter (2021.7)
yield AlexaRangeController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_SPEED}"
)
if supported & fan.SUPPORT_OSCILLATE:
yield AlexaToggleController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}"
)
if supported & fan.SUPPORT_PRESET_MODE:
yield AlexaModeController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_PRESET_MODE}"
)
if supported & fan.SUPPORT_DIRECTION:
yield AlexaModeController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_DIRECTION}"
)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(lock.DOMAIN)
class LockCapabilities(AlexaEntity):
"""Class to represent Lock capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SMARTLOCK]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaLockController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(media_player.const.DOMAIN)
class MediaPlayerCapabilities(AlexaEntity):
"""Class to represent MediaPlayer capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class == media_player.DEVICE_CLASS_SPEAKER:
return [DisplayCategory.SPEAKER]
return [DisplayCategory.TV]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & media_player.const.SUPPORT_VOLUME_SET:
yield AlexaSpeaker(self.entity)
elif supported & media_player.const.SUPPORT_VOLUME_STEP:
yield AlexaStepSpeaker(self.entity)
playback_features = (
media_player.const.SUPPORT_PLAY
| media_player.const.SUPPORT_PAUSE
| media_player.const.SUPPORT_STOP
| media_player.const.SUPPORT_NEXT_TRACK
| media_player.const.SUPPORT_PREVIOUS_TRACK
)
if supported & playback_features:
yield AlexaPlaybackController(self.entity)
yield AlexaPlaybackStateReporter(self.entity)
if supported & media_player.const.SUPPORT_SEEK:
yield AlexaSeekController(self.entity)
if supported & media_player.SUPPORT_SELECT_SOURCE:
inputs = AlexaInputController.get_valid_inputs(
self.entity.attributes.get(
media_player.const.ATTR_INPUT_SOURCE_LIST, []
)
)
if len(inputs) > 0:
yield AlexaInputController(self.entity)
if supported & media_player.const.SUPPORT_PLAY_MEDIA:
yield AlexaChannelController(self.entity)
# AlexaEqualizerController is disabled for denonavr
# since it blocks alexa from discovering any devices.
domain = entity_sources(self.hass).get(self.entity_id, {}).get("domain")
if (
supported & media_player.const.SUPPORT_SELECT_SOUND_MODE
and domain != "denonavr"
):
inputs = AlexaEqualizerController.get_valid_inputs(
self.entity.attributes.get(media_player.const.ATTR_SOUND_MODE_LIST, [])
)
if len(inputs) > 0:
yield AlexaEqualizerController(self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(scene.DOMAIN)
class SceneCapabilities(AlexaEntity):
"""Class to represent Scene capabilities."""
def description(self):
"""Return the Alexa API description."""
description = AlexaEntity.description(self)
if "scene" not in description.casefold():
return f"{description} (Scene)"
return description
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SCENE_TRIGGER]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaSceneController(self.entity, supports_deactivation=False),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(script.DOMAIN)
class ScriptCapabilities(AlexaEntity):
"""Class to represent Script capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.ACTIVITY_TRIGGER]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaSceneController(self.entity, supports_deactivation=True),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(sensor.DOMAIN)
class SensorCapabilities(AlexaEntity):
"""Class to represent Sensor capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
# although there are other kinds of sensors, all but temperature
# sensors are currently ignored.
return [DisplayCategory.TEMPERATURE_SENSOR]
def interfaces(self):
"""Yield the supported interfaces."""
attrs = self.entity.attributes
if attrs.get(ATTR_UNIT_OF_MEASUREMENT) in (TEMP_FAHRENHEIT, TEMP_CELSIUS):
yield AlexaTemperatureSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(binary_sensor.DOMAIN)
class BinarySensorCapabilities(AlexaEntity):
"""Class to represent BinarySensor capabilities."""
TYPE_CONTACT = "contact"
TYPE_MOTION = "motion"
TYPE_PRESENCE = "presence"
def default_display_categories(self):
"""Return the display categories for this entity."""
sensor_type = self.get_type()
if sensor_type is self.TYPE_CONTACT:
return [DisplayCategory.CONTACT_SENSOR]
if sensor_type is self.TYPE_MOTION:
return [DisplayCategory.MOTION_SENSOR]
if sensor_type is self.TYPE_PRESENCE:
return [DisplayCategory.CAMERA]
def interfaces(self):
"""Yield the supported interfaces."""
sensor_type = self.get_type()
if sensor_type is self.TYPE_CONTACT:
yield AlexaContactSensor(self.hass, self.entity)
elif sensor_type is self.TYPE_MOTION:
yield AlexaMotionSensor(self.hass, self.entity)
elif sensor_type is self.TYPE_PRESENCE:
yield AlexaEventDetectionSensor(self.hass, self.entity)
# yield additional interfaces based on specified display category in config.
entity_conf = self.config.entity_config.get(self.entity.entity_id, {})
if CONF_DISPLAY_CATEGORIES in entity_conf:
if entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.DOORBELL:
yield AlexaDoorbellEventSource(self.entity)
elif entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.CONTACT_SENSOR:
yield AlexaContactSensor(self.hass, self.entity)
elif entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.MOTION_SENSOR:
yield AlexaMotionSensor(self.hass, self.entity)
elif entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.CAMERA:
yield AlexaEventDetectionSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
def get_type(self):
"""Return the type of binary sensor."""
attrs = self.entity.attributes
if attrs.get(ATTR_DEVICE_CLASS) in (
binary_sensor.DEVICE_CLASS_DOOR,
binary_sensor.DEVICE_CLASS_GARAGE_DOOR,
binary_sensor.DEVICE_CLASS_OPENING,
binary_sensor.DEVICE_CLASS_WINDOW,
):
return self.TYPE_CONTACT
if attrs.get(ATTR_DEVICE_CLASS) == binary_sensor.DEVICE_CLASS_MOTION:
return self.TYPE_MOTION
if attrs.get(ATTR_DEVICE_CLASS) == binary_sensor.DEVICE_CLASS_PRESENCE:
return self.TYPE_PRESENCE
@ENTITY_ADAPTERS.register(alarm_control_panel.DOMAIN)
class AlarmControlPanelCapabilities(AlexaEntity):
"""Class to represent Alarm capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SECURITY_PANEL]
def interfaces(self):
"""Yield the supported interfaces."""
if not self.entity.attributes.get("code_arm_required"):
yield AlexaSecurityPanelController(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(image_processing.DOMAIN)
class ImageProcessingCapabilities(AlexaEntity):
"""Class to represent image_processing capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.CAMERA]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaEventDetectionSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(input_number.DOMAIN)
class InputNumberCapabilities(AlexaEntity):
"""Class to represent input_number capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaRangeController(
self.entity, instance=f"{input_number.DOMAIN}.{input_number.ATTR_VALUE}"
)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(timer.DOMAIN)
class TimerCapabilities(AlexaEntity):
"""Class to represent Timer capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaTimeHoldController(self.entity, allow_remote_resume=True)
yield AlexaPowerController(self.entity)
yield Alexa(self.entity)
@ENTITY_ADAPTERS.register(vacuum.DOMAIN)
class VacuumCapabilities(AlexaEntity):
"""Class to represent vacuum capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.VACUUM_CLEANER]
def interfaces(self):
"""Yield the supported interfaces."""
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if (
(supported & vacuum.SUPPORT_TURN_ON) or (supported & vacuum.SUPPORT_START)
) and (
(supported & vacuum.SUPPORT_TURN_OFF)
or (supported & vacuum.SUPPORT_RETURN_HOME)
):
yield AlexaPowerController(self.entity)
if supported & vacuum.SUPPORT_FAN_SPEED:
yield AlexaRangeController(
self.entity, instance=f"{vacuum.DOMAIN}.{vacuum.ATTR_FAN_SPEED}"
)
if supported & vacuum.SUPPORT_PAUSE:
support_resume = bool(supported & vacuum.SUPPORT_START)
yield AlexaTimeHoldController(
self.entity, allow_remote_resume=support_resume
)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(camera.DOMAIN)
class CameraCapabilities(AlexaEntity):
"""Class to represent Camera capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.CAMERA]
def interfaces(self):
"""Yield the supported interfaces."""
if self._check_requirements():
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & camera.SUPPORT_STREAM:
yield AlexaCameraStreamController(self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
def _check_requirements(self):
"""Check the hass URL for HTTPS scheme."""
if "stream" not in self.hass.config.components:
_LOGGER.debug(
"%s requires stream component for AlexaCameraStreamController",
self.entity_id,
)
return False
try:
network.get_url(
self.hass,
allow_internal=False,
allow_ip=False,
require_ssl=True,
require_standard_port=True,
)
except network.NoURLAvailableError:
_LOGGER.debug(
"%s requires HTTPS for AlexaCameraStreamController", self.entity_id
)
return False
return True
| {
"repo_name": "kennedyshead/home-assistant",
"path": "homeassistant/components/alexa/entities.py",
"copies": "1",
"size": "30471",
"license": "apache-2.0",
"hash": 8125997992793968000,
"line_mean": 32.89432703,
"line_max": 116,
"alpha_frac": 0.6570509665,
"autogenerated": false,
"ratio": 4.214522821576764,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5371573788076763,
"avg_score": null,
"num_lines": null
} |
"""Alexa entity adapters."""
from typing import List
from homeassistant.components import (
alarm_control_panel,
alert,
automation,
binary_sensor,
cover,
fan,
group,
image_processing,
input_boolean,
input_number,
light,
lock,
media_player,
scene,
script,
sensor,
switch,
timer,
vacuum,
)
from homeassistant.components.climate import const as climate
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_SUPPORTED_FEATURES,
ATTR_UNIT_OF_MEASUREMENT,
CLOUD_NEVER_EXPOSED_ENTITIES,
CONF_NAME,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import callback
from homeassistant.util.decorator import Registry
from .capabilities import (
Alexa,
AlexaBrightnessController,
AlexaChannelController,
AlexaColorController,
AlexaColorTemperatureController,
AlexaContactSensor,
AlexaDoorbellEventSource,
AlexaEndpointHealth,
AlexaEqualizerController,
AlexaEventDetectionSensor,
AlexaInputController,
AlexaLockController,
AlexaModeController,
AlexaMotionSensor,
AlexaPercentageController,
AlexaPlaybackController,
AlexaPlaybackStateReporter,
AlexaPowerController,
AlexaPowerLevelController,
AlexaRangeController,
AlexaSceneController,
AlexaSecurityPanelController,
AlexaSeekController,
AlexaSpeaker,
AlexaStepSpeaker,
AlexaTemperatureSensor,
AlexaThermostatController,
AlexaTimeHoldController,
AlexaToggleController,
)
from .const import CONF_DESCRIPTION, CONF_DISPLAY_CATEGORIES
ENTITY_ADAPTERS = Registry()
TRANSLATION_TABLE = dict.fromkeys(map(ord, r"}{\/|\"()[]+~!><*%"), None)
class DisplayCategory:
"""Possible display categories for Discovery response.
https://developer.amazon.com/docs/device-apis/alexa-discovery.html#display-categories
"""
# Describes a combination of devices set to a specific state, when the
# state change must occur in a specific order. For example, a "watch
# Netflix" scene might require the: 1. TV to be powered on & 2. Input set
# to HDMI1. Applies to Scenes
ACTIVITY_TRIGGER = "ACTIVITY_TRIGGER"
# Indicates media devices with video or photo capabilities.
CAMERA = "CAMERA"
# Indicates a non-mobile computer, such as a desktop computer.
COMPUTER = "COMPUTER"
# Indicates an endpoint that detects and reports contact.
CONTACT_SENSOR = "CONTACT_SENSOR"
# Indicates a door.
DOOR = "DOOR"
# Indicates a doorbell.
DOORBELL = "DOORBELL"
# Indicates a window covering on the outside of a structure.
EXTERIOR_BLIND = "EXTERIOR_BLIND"
# Indicates a fan.
FAN = "FAN"
# Indicates a game console, such as Microsoft Xbox or Nintendo Switch
GAME_CONSOLE = "GAME_CONSOLE"
# Indicates a garage door. Garage doors must implement the ModeController interface to open and close the door.
GARAGE_DOOR = "GARAGE_DOOR"
# Indicates a window covering on the inside of a structure.
INTERIOR_BLIND = "INTERIOR_BLIND"
# Indicates a laptop or other mobile computer.
LAPTOP = "LAPTOP"
# Indicates light sources or fixtures.
LIGHT = "LIGHT"
# Indicates a microwave oven.
MICROWAVE = "MICROWAVE"
# Indicates a mobile phone.
MOBILE_PHONE = "MOBILE_PHONE"
# Indicates an endpoint that detects and reports motion.
MOTION_SENSOR = "MOTION_SENSOR"
# Indicates a network-connected music system.
MUSIC_SYSTEM = "MUSIC_SYSTEM"
# An endpoint that cannot be described in on of the other categories.
OTHER = "OTHER"
# Indicates a network router.
NETWORK_HARDWARE = "NETWORK_HARDWARE"
# Indicates an oven cooking appliance.
OVEN = "OVEN"
# Indicates a non-mobile phone, such as landline or an IP phone.
PHONE = "PHONE"
# Describes a combination of devices set to a specific state, when the
# order of the state change is not important. For example a bedtime scene
# might include turning off lights and lowering the thermostat, but the
# order is unimportant. Applies to Scenes
SCENE_TRIGGER = "SCENE_TRIGGER"
# Indicates a projector screen.
SCREEN = "SCREEN"
# Indicates a security panel.
SECURITY_PANEL = "SECURITY_PANEL"
# Indicates an endpoint that locks.
SMARTLOCK = "SMARTLOCK"
# Indicates modules that are plugged into an existing electrical outlet.
# Can control a variety of devices.
SMARTPLUG = "SMARTPLUG"
# Indicates the endpoint is a speaker or speaker system.
SPEAKER = "SPEAKER"
# Indicates a streaming device such as Apple TV, Chromecast, or Roku.
STREAMING_DEVICE = "STREAMING_DEVICE"
# Indicates in-wall switches wired to the electrical system. Can control a
# variety of devices.
SWITCH = "SWITCH"
# Indicates a tablet computer.
TABLET = "TABLET"
# Indicates endpoints that report the temperature only.
TEMPERATURE_SENSOR = "TEMPERATURE_SENSOR"
# Indicates endpoints that control temperature, stand-alone air
# conditioners, or heaters with direct temperature control.
THERMOSTAT = "THERMOSTAT"
# Indicates the endpoint is a television.
TV = "TV"
# Indicates a network-connected wearable device, such as an Apple Watch, Fitbit, or Samsung Gear.
WEARABLE = "WEARABLE"
class AlexaEntity:
"""An adaptation of an entity, expressed in Alexa's terms.
The API handlers should manipulate entities only through this interface.
"""
def __init__(self, hass, config, entity):
"""Initialize Alexa Entity."""
self.hass = hass
self.config = config
self.entity = entity
self.entity_conf = config.entity_config.get(entity.entity_id, {})
@property
def entity_id(self):
"""Return the Entity ID."""
return self.entity.entity_id
def friendly_name(self):
"""Return the Alexa API friendly name."""
return self.entity_conf.get(CONF_NAME, self.entity.name).translate(
TRANSLATION_TABLE
)
def description(self):
"""Return the Alexa API description."""
description = self.entity_conf.get(CONF_DESCRIPTION) or self.entity_id
return f"{description} via Home Assistant".translate(TRANSLATION_TABLE)
def alexa_id(self):
"""Return the Alexa API entity id."""
return self.entity.entity_id.replace(".", "#").translate(TRANSLATION_TABLE)
def display_categories(self):
"""Return a list of display categories."""
entity_conf = self.config.entity_config.get(self.entity.entity_id, {})
if CONF_DISPLAY_CATEGORIES in entity_conf:
return [entity_conf[CONF_DISPLAY_CATEGORIES]]
return self.default_display_categories()
def default_display_categories(self):
"""Return a list of default display categories.
This can be overridden by the user in the Home Assistant configuration.
See also DisplayCategory.
"""
raise NotImplementedError
def get_interface(self, capability):
"""Return the given AlexaInterface.
Raises _UnsupportedInterface.
"""
pass
def interfaces(self):
"""Return a list of supported interfaces.
Used for discovery. The list should contain AlexaInterface instances.
If the list is empty, this entity will not be discovered.
"""
raise NotImplementedError
def serialize_properties(self):
"""Yield each supported property in API format."""
for interface in self.interfaces():
if not interface.properties_proactively_reported():
continue
for prop in interface.serialize_properties():
yield prop
def serialize_discovery(self):
"""Serialize the entity for discovery."""
result = {
"displayCategories": self.display_categories(),
"cookie": {},
"endpointId": self.alexa_id(),
"friendlyName": self.friendly_name(),
"description": self.description(),
"manufacturerName": "Home Assistant",
}
locale = self.config.locale
capabilities = []
for i in self.interfaces():
if locale in i.supported_locales:
capabilities.append(i.serialize_discovery())
result["capabilities"] = capabilities
return result
@callback
def async_get_entities(hass, config) -> List[AlexaEntity]:
"""Return all entities that are supported by Alexa."""
entities = []
for state in hass.states.async_all():
if state.entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
continue
if state.domain not in ENTITY_ADAPTERS:
continue
alexa_entity = ENTITY_ADAPTERS[state.domain](hass, config, state)
if not list(alexa_entity.interfaces()):
continue
entities.append(alexa_entity)
return entities
@ENTITY_ADAPTERS.register(alert.DOMAIN)
@ENTITY_ADAPTERS.register(automation.DOMAIN)
@ENTITY_ADAPTERS.register(group.DOMAIN)
@ENTITY_ADAPTERS.register(input_boolean.DOMAIN)
class GenericCapabilities(AlexaEntity):
"""A generic, on/off device.
The choice of last resort.
"""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaPowerController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(switch.DOMAIN)
class SwitchCapabilities(AlexaEntity):
"""Class to represent Switch capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class == switch.DEVICE_CLASS_OUTLET:
return [DisplayCategory.SMARTPLUG]
return [DisplayCategory.SWITCH]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaPowerController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(climate.DOMAIN)
class ClimateCapabilities(AlexaEntity):
"""Class to represent Climate capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.THERMOSTAT]
def interfaces(self):
"""Yield the supported interfaces."""
# If we support two modes, one being off, we allow turning on too.
if climate.HVAC_MODE_OFF in self.entity.attributes.get(
climate.ATTR_HVAC_MODES, []
):
yield AlexaPowerController(self.entity)
yield AlexaThermostatController(self.hass, self.entity)
yield AlexaTemperatureSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(cover.DOMAIN)
class CoverCapabilities(AlexaEntity):
"""Class to represent Cover capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class == cover.DEVICE_CLASS_GARAGE:
return [DisplayCategory.GARAGE_DOOR]
if device_class == cover.DEVICE_CLASS_DOOR:
return [DisplayCategory.DOOR]
if device_class in (
cover.DEVICE_CLASS_BLIND,
cover.DEVICE_CLASS_SHADE,
cover.DEVICE_CLASS_CURTAIN,
):
return [DisplayCategory.INTERIOR_BLIND]
if device_class in (
cover.DEVICE_CLASS_WINDOW,
cover.DEVICE_CLASS_AWNING,
cover.DEVICE_CLASS_SHUTTER,
):
return [DisplayCategory.EXTERIOR_BLIND]
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & cover.SUPPORT_SET_POSITION:
yield AlexaRangeController(
self.entity, instance=f"{cover.DOMAIN}.{cover.ATTR_POSITION}"
)
elif supported & (cover.SUPPORT_CLOSE | cover.SUPPORT_OPEN):
yield AlexaModeController(
self.entity, instance=f"{cover.DOMAIN}.{cover.ATTR_POSITION}"
)
if supported & cover.SUPPORT_SET_TILT_POSITION:
yield AlexaRangeController(self.entity, instance=f"{cover.DOMAIN}.tilt")
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(light.DOMAIN)
class LightCapabilities(AlexaEntity):
"""Class to represent Light capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.LIGHT]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & light.SUPPORT_BRIGHTNESS:
yield AlexaBrightnessController(self.entity)
if supported & light.SUPPORT_COLOR:
yield AlexaColorController(self.entity)
if supported & light.SUPPORT_COLOR_TEMP:
yield AlexaColorTemperatureController(self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(fan.DOMAIN)
class FanCapabilities(AlexaEntity):
"""Class to represent Fan capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.FAN]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & fan.SUPPORT_SET_SPEED:
yield AlexaPercentageController(self.entity)
yield AlexaPowerLevelController(self.entity)
yield AlexaRangeController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_SPEED}"
)
if supported & fan.SUPPORT_OSCILLATE:
yield AlexaToggleController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}"
)
if supported & fan.SUPPORT_DIRECTION:
yield AlexaModeController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_DIRECTION}"
)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(lock.DOMAIN)
class LockCapabilities(AlexaEntity):
"""Class to represent Lock capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SMARTLOCK]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaLockController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(media_player.const.DOMAIN)
class MediaPlayerCapabilities(AlexaEntity):
"""Class to represent MediaPlayer capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class == media_player.DEVICE_CLASS_SPEAKER:
return [DisplayCategory.SPEAKER]
return [DisplayCategory.TV]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & media_player.const.SUPPORT_VOLUME_SET:
yield AlexaSpeaker(self.entity)
step_volume_features = (
media_player.const.SUPPORT_VOLUME_MUTE
| media_player.const.SUPPORT_VOLUME_STEP
)
if supported & step_volume_features:
yield AlexaStepSpeaker(self.entity)
playback_features = (
media_player.const.SUPPORT_PLAY
| media_player.const.SUPPORT_PAUSE
| media_player.const.SUPPORT_STOP
| media_player.const.SUPPORT_NEXT_TRACK
| media_player.const.SUPPORT_PREVIOUS_TRACK
)
if supported & playback_features:
yield AlexaPlaybackController(self.entity)
yield AlexaPlaybackStateReporter(self.entity)
if supported & media_player.const.SUPPORT_SEEK:
yield AlexaSeekController(self.entity)
if supported & media_player.SUPPORT_SELECT_SOURCE:
yield AlexaInputController(self.entity)
if supported & media_player.const.SUPPORT_PLAY_MEDIA:
yield AlexaChannelController(self.entity)
if supported & media_player.const.SUPPORT_SELECT_SOUND_MODE:
yield AlexaEqualizerController(self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(scene.DOMAIN)
class SceneCapabilities(AlexaEntity):
"""Class to represent Scene capabilities."""
def description(self):
"""Return the Alexa API description."""
description = AlexaEntity.description(self)
if "scene" not in description.casefold():
return f"{description} (Scene)"
return description
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SCENE_TRIGGER]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaSceneController(self.entity, supports_deactivation=False),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(script.DOMAIN)
class ScriptCapabilities(AlexaEntity):
"""Class to represent Script capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.ACTIVITY_TRIGGER]
def interfaces(self):
"""Yield the supported interfaces."""
can_cancel = bool(self.entity.attributes.get("can_cancel"))
return [
AlexaSceneController(self.entity, supports_deactivation=can_cancel),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(sensor.DOMAIN)
class SensorCapabilities(AlexaEntity):
"""Class to represent Sensor capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
# although there are other kinds of sensors, all but temperature
# sensors are currently ignored.
return [DisplayCategory.TEMPERATURE_SENSOR]
def interfaces(self):
"""Yield the supported interfaces."""
attrs = self.entity.attributes
if attrs.get(ATTR_UNIT_OF_MEASUREMENT) in (TEMP_FAHRENHEIT, TEMP_CELSIUS):
yield AlexaTemperatureSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(binary_sensor.DOMAIN)
class BinarySensorCapabilities(AlexaEntity):
"""Class to represent BinarySensor capabilities."""
TYPE_CONTACT = "contact"
TYPE_MOTION = "motion"
TYPE_PRESENCE = "presence"
def default_display_categories(self):
"""Return the display categories for this entity."""
sensor_type = self.get_type()
if sensor_type is self.TYPE_CONTACT:
return [DisplayCategory.CONTACT_SENSOR]
if sensor_type is self.TYPE_MOTION:
return [DisplayCategory.MOTION_SENSOR]
if sensor_type is self.TYPE_PRESENCE:
return [DisplayCategory.CAMERA]
def interfaces(self):
"""Yield the supported interfaces."""
sensor_type = self.get_type()
if sensor_type is self.TYPE_CONTACT:
yield AlexaContactSensor(self.hass, self.entity)
elif sensor_type is self.TYPE_MOTION:
yield AlexaMotionSensor(self.hass, self.entity)
elif sensor_type is self.TYPE_PRESENCE:
yield AlexaEventDetectionSensor(self.hass, self.entity)
# yield additional interfaces based on specified display category in config.
entity_conf = self.config.entity_config.get(self.entity.entity_id, {})
if CONF_DISPLAY_CATEGORIES in entity_conf:
if entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.DOORBELL:
yield AlexaDoorbellEventSource(self.entity)
elif entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.CONTACT_SENSOR:
yield AlexaContactSensor(self.hass, self.entity)
elif entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.MOTION_SENSOR:
yield AlexaMotionSensor(self.hass, self.entity)
elif entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.CAMERA:
yield AlexaEventDetectionSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
def get_type(self):
"""Return the type of binary sensor."""
attrs = self.entity.attributes
if attrs.get(ATTR_DEVICE_CLASS) in (
binary_sensor.DEVICE_CLASS_DOOR,
binary_sensor.DEVICE_CLASS_GARAGE_DOOR,
binary_sensor.DEVICE_CLASS_OPENING,
binary_sensor.DEVICE_CLASS_WINDOW,
):
return self.TYPE_CONTACT
if attrs.get(ATTR_DEVICE_CLASS) == binary_sensor.DEVICE_CLASS_MOTION:
return self.TYPE_MOTION
if attrs.get(ATTR_DEVICE_CLASS) == binary_sensor.DEVICE_CLASS_PRESENCE:
return self.TYPE_PRESENCE
@ENTITY_ADAPTERS.register(alarm_control_panel.DOMAIN)
class AlarmControlPanelCapabilities(AlexaEntity):
"""Class to represent Alarm capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SECURITY_PANEL]
def interfaces(self):
"""Yield the supported interfaces."""
if not self.entity.attributes.get("code_arm_required"):
yield AlexaSecurityPanelController(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(image_processing.DOMAIN)
class ImageProcessingCapabilities(AlexaEntity):
"""Class to represent image_processing capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.CAMERA]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaEventDetectionSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(input_number.DOMAIN)
class InputNumberCapabilities(AlexaEntity):
"""Class to represent input_number capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaRangeController(
self.entity, instance=f"{input_number.DOMAIN}.{input_number.ATTR_VALUE}"
)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(timer.DOMAIN)
class TimerCapabilities(AlexaEntity):
"""Class to represent Timer capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaTimeHoldController(self.entity, allow_remote_resume=True)
yield Alexa(self.entity)
@ENTITY_ADAPTERS.register(vacuum.DOMAIN)
class VacuumCapabilities(AlexaEntity):
"""Class to represent vacuum capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if (supported & vacuum.SUPPORT_TURN_ON) and (
supported & vacuum.SUPPORT_TURN_OFF
):
yield AlexaPowerController(self.entity)
if supported & vacuum.SUPPORT_FAN_SPEED:
yield AlexaRangeController(
self.entity, instance=f"{vacuum.DOMAIN}.{vacuum.ATTR_FAN_SPEED}"
)
if supported & vacuum.SUPPORT_PAUSE:
support_resume = bool(supported & vacuum.SUPPORT_START)
yield AlexaTimeHoldController(
self.entity, allow_remote_resume=support_resume
)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
| {
"repo_name": "Teagan42/home-assistant",
"path": "homeassistant/components/alexa/entities.py",
"copies": "1",
"size": "25487",
"license": "apache-2.0",
"hash": -5503343197778340000,
"line_mean": 32.6684280053,
"line_max": 115,
"alpha_frac": 0.6640640326,
"autogenerated": false,
"ratio": 4.2274008956709235,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5391464928270924,
"avg_score": null,
"num_lines": null
} |
"""Alexa entity adapters."""
from typing import List
from homeassistant.core import callback
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_SUPPORTED_FEATURES,
ATTR_UNIT_OF_MEASUREMENT,
CLOUD_NEVER_EXPOSED_ENTITIES,
CONF_NAME,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.util.decorator import Registry
from homeassistant.components.climate import const as climate
from homeassistant.components import (
alarm_control_panel,
alert,
automation,
binary_sensor,
cover,
fan,
group,
input_boolean,
light,
lock,
media_player,
scene,
script,
sensor,
switch,
)
from .const import CONF_DESCRIPTION, CONF_DISPLAY_CATEGORIES
from .capabilities import (
Alexa,
AlexaBrightnessController,
AlexaChannelController,
AlexaColorController,
AlexaColorTemperatureController,
AlexaContactSensor,
AlexaDoorbellEventSource,
AlexaEndpointHealth,
AlexaInputController,
AlexaLockController,
AlexaModeController,
AlexaMotionSensor,
AlexaPercentageController,
AlexaPlaybackController,
AlexaPlaybackStateReporter,
AlexaPowerController,
AlexaPowerLevelController,
AlexaRangeController,
AlexaSceneController,
AlexaSecurityPanelController,
AlexaSeekController,
AlexaSpeaker,
AlexaStepSpeaker,
AlexaTemperatureSensor,
AlexaThermostatController,
AlexaToggleController,
)
ENTITY_ADAPTERS = Registry()
TRANSLATION_TABLE = dict.fromkeys(map(ord, r"}{\/|\"()[]+~!><*%"), None)
class DisplayCategory:
"""Possible display categories for Discovery response.
https://developer.amazon.com/docs/device-apis/alexa-discovery.html#display-categories
"""
# Describes a combination of devices set to a specific state, when the
# state change must occur in a specific order. For example, a "watch
# Netflix" scene might require the: 1. TV to be powered on & 2. Input set
# to HDMI1. Applies to Scenes
ACTIVITY_TRIGGER = "ACTIVITY_TRIGGER"
# Indicates media devices with video or photo capabilities.
CAMERA = "CAMERA"
# Indicates an endpoint that detects and reports contact.
CONTACT_SENSOR = "CONTACT_SENSOR"
# Indicates a door.
DOOR = "DOOR"
# Indicates a doorbell.
DOORBELL = "DOORBELL"
# Indicates a fan.
FAN = "FAN"
# Indicates light sources or fixtures.
LIGHT = "LIGHT"
# Indicates a microwave oven.
MICROWAVE = "MICROWAVE"
# Indicates an endpoint that detects and reports motion.
MOTION_SENSOR = "MOTION_SENSOR"
# An endpoint that cannot be described in on of the other categories.
OTHER = "OTHER"
# Describes a combination of devices set to a specific state, when the
# order of the state change is not important. For example a bedtime scene
# might include turning off lights and lowering the thermostat, but the
# order is unimportant. Applies to Scenes
SCENE_TRIGGER = "SCENE_TRIGGER"
# Indicates a security panel.
SECURITY_PANEL = "SECURITY_PANEL"
# Indicates an endpoint that locks.
SMARTLOCK = "SMARTLOCK"
# Indicates modules that are plugged into an existing electrical outlet.
# Can control a variety of devices.
SMARTPLUG = "SMARTPLUG"
# Indicates the endpoint is a speaker or speaker system.
SPEAKER = "SPEAKER"
# Indicates in-wall switches wired to the electrical system. Can control a
# variety of devices.
SWITCH = "SWITCH"
# Indicates endpoints that report the temperature only.
TEMPERATURE_SENSOR = "TEMPERATURE_SENSOR"
# Indicates endpoints that control temperature, stand-alone air
# conditioners, or heaters with direct temperature control.
THERMOSTAT = "THERMOSTAT"
# Indicates the endpoint is a television.
TV = "TV"
class AlexaEntity:
"""An adaptation of an entity, expressed in Alexa's terms.
The API handlers should manipulate entities only through this interface.
"""
def __init__(self, hass, config, entity):
"""Initialize Alexa Entity."""
self.hass = hass
self.config = config
self.entity = entity
self.entity_conf = config.entity_config.get(entity.entity_id, {})
@property
def entity_id(self):
"""Return the Entity ID."""
return self.entity.entity_id
def friendly_name(self):
"""Return the Alexa API friendly name."""
return self.entity_conf.get(CONF_NAME, self.entity.name).translate(
TRANSLATION_TABLE
)
def description(self):
"""Return the Alexa API description."""
description = self.entity_conf.get(CONF_DESCRIPTION) or self.entity_id
return f"{description} via Home Assistant".translate(TRANSLATION_TABLE)
def alexa_id(self):
"""Return the Alexa API entity id."""
return self.entity.entity_id.replace(".", "#").translate(TRANSLATION_TABLE)
def display_categories(self):
"""Return a list of display categories."""
entity_conf = self.config.entity_config.get(self.entity.entity_id, {})
if CONF_DISPLAY_CATEGORIES in entity_conf:
return [entity_conf[CONF_DISPLAY_CATEGORIES]]
return self.default_display_categories()
def default_display_categories(self):
"""Return a list of default display categories.
This can be overridden by the user in the Home Assistant configuration.
See also DisplayCategory.
"""
raise NotImplementedError
def get_interface(self, capability):
"""Return the given AlexaInterface.
Raises _UnsupportedInterface.
"""
pass
def interfaces(self):
"""Return a list of supported interfaces.
Used for discovery. The list should contain AlexaInterface instances.
If the list is empty, this entity will not be discovered.
"""
raise NotImplementedError
def serialize_properties(self):
"""Yield each supported property in API format."""
for interface in self.interfaces():
for prop in interface.serialize_properties():
yield prop
def serialize_discovery(self):
"""Serialize the entity for discovery."""
return {
"displayCategories": self.display_categories(),
"cookie": {},
"endpointId": self.alexa_id(),
"friendlyName": self.friendly_name(),
"description": self.description(),
"manufacturerName": "Home Assistant",
"capabilities": [i.serialize_discovery() for i in self.interfaces()],
}
@callback
def async_get_entities(hass, config) -> List[AlexaEntity]:
"""Return all entities that are supported by Alexa."""
entities = []
for state in hass.states.async_all():
if state.entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
continue
if state.domain not in ENTITY_ADAPTERS:
continue
alexa_entity = ENTITY_ADAPTERS[state.domain](hass, config, state)
if not list(alexa_entity.interfaces()):
continue
entities.append(alexa_entity)
return entities
@ENTITY_ADAPTERS.register(alert.DOMAIN)
@ENTITY_ADAPTERS.register(automation.DOMAIN)
@ENTITY_ADAPTERS.register(group.DOMAIN)
@ENTITY_ADAPTERS.register(input_boolean.DOMAIN)
class GenericCapabilities(AlexaEntity):
"""A generic, on/off device.
The choice of last resort.
"""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaPowerController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(switch.DOMAIN)
class SwitchCapabilities(AlexaEntity):
"""Class to represent Switch capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class == switch.DEVICE_CLASS_OUTLET:
return [DisplayCategory.SMARTPLUG]
return [DisplayCategory.SWITCH]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaPowerController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(climate.DOMAIN)
class ClimateCapabilities(AlexaEntity):
"""Class to represent Climate capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.THERMOSTAT]
def interfaces(self):
"""Yield the supported interfaces."""
# If we support two modes, one being off, we allow turning on too.
if climate.HVAC_MODE_OFF in self.entity.attributes.get(
climate.ATTR_HVAC_MODES, []
):
yield AlexaPowerController(self.entity)
yield AlexaThermostatController(self.hass, self.entity)
yield AlexaTemperatureSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(cover.DOMAIN)
class CoverCapabilities(AlexaEntity):
"""Class to represent Cover capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class in (cover.DEVICE_CLASS_GARAGE, cover.DEVICE_CLASS_DOOR):
return [DisplayCategory.DOOR]
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & cover.SUPPORT_SET_POSITION:
yield AlexaPercentageController(self.entity)
if supported & (cover.SUPPORT_CLOSE | cover.SUPPORT_OPEN):
yield AlexaModeController(
self.entity, instance=f"{cover.DOMAIN}.{cover.ATTR_POSITION}"
)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(light.DOMAIN)
class LightCapabilities(AlexaEntity):
"""Class to represent Light capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.LIGHT]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & light.SUPPORT_BRIGHTNESS:
yield AlexaBrightnessController(self.entity)
if supported & light.SUPPORT_COLOR:
yield AlexaColorController(self.entity)
if supported & light.SUPPORT_COLOR_TEMP:
yield AlexaColorTemperatureController(self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(fan.DOMAIN)
class FanCapabilities(AlexaEntity):
"""Class to represent Fan capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.FAN]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & fan.SUPPORT_SET_SPEED:
yield AlexaPercentageController(self.entity)
yield AlexaPowerLevelController(self.entity)
yield AlexaRangeController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_SPEED}"
)
if supported & fan.SUPPORT_OSCILLATE:
yield AlexaToggleController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}"
)
if supported & fan.SUPPORT_DIRECTION:
yield AlexaModeController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_DIRECTION}"
)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(lock.DOMAIN)
class LockCapabilities(AlexaEntity):
"""Class to represent Lock capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SMARTLOCK]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaLockController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(media_player.const.DOMAIN)
class MediaPlayerCapabilities(AlexaEntity):
"""Class to represent MediaPlayer capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class == media_player.DEVICE_CLASS_SPEAKER:
return [DisplayCategory.SPEAKER]
return [DisplayCategory.TV]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & media_player.const.SUPPORT_VOLUME_SET:
yield AlexaSpeaker(self.entity)
step_volume_features = (
media_player.const.SUPPORT_VOLUME_MUTE
| media_player.const.SUPPORT_VOLUME_STEP
)
if supported & step_volume_features:
yield AlexaStepSpeaker(self.entity)
playback_features = (
media_player.const.SUPPORT_PLAY
| media_player.const.SUPPORT_PAUSE
| media_player.const.SUPPORT_STOP
| media_player.const.SUPPORT_NEXT_TRACK
| media_player.const.SUPPORT_PREVIOUS_TRACK
)
if supported & playback_features:
yield AlexaPlaybackController(self.entity)
yield AlexaPlaybackStateReporter(self.entity)
if supported & media_player.const.SUPPORT_SEEK:
yield AlexaSeekController(self.entity)
if supported & media_player.SUPPORT_SELECT_SOURCE:
yield AlexaInputController(self.entity)
if supported & media_player.const.SUPPORT_PLAY_MEDIA:
yield AlexaChannelController(self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(scene.DOMAIN)
class SceneCapabilities(AlexaEntity):
"""Class to represent Scene capabilities."""
def description(self):
"""Return the Alexa API description."""
description = AlexaEntity.description(self)
if "scene" not in description.casefold():
return f"{description} (Scene)"
return description
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SCENE_TRIGGER]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaSceneController(self.entity, supports_deactivation=False),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(script.DOMAIN)
class ScriptCapabilities(AlexaEntity):
"""Class to represent Script capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.ACTIVITY_TRIGGER]
def interfaces(self):
"""Yield the supported interfaces."""
can_cancel = bool(self.entity.attributes.get("can_cancel"))
return [
AlexaSceneController(self.entity, supports_deactivation=can_cancel),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(sensor.DOMAIN)
class SensorCapabilities(AlexaEntity):
"""Class to represent Sensor capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
# although there are other kinds of sensors, all but temperature
# sensors are currently ignored.
return [DisplayCategory.TEMPERATURE_SENSOR]
def interfaces(self):
"""Yield the supported interfaces."""
attrs = self.entity.attributes
if attrs.get(ATTR_UNIT_OF_MEASUREMENT) in (TEMP_FAHRENHEIT, TEMP_CELSIUS):
yield AlexaTemperatureSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(binary_sensor.DOMAIN)
class BinarySensorCapabilities(AlexaEntity):
"""Class to represent BinarySensor capabilities."""
TYPE_CONTACT = "contact"
TYPE_MOTION = "motion"
def default_display_categories(self):
"""Return the display categories for this entity."""
sensor_type = self.get_type()
if sensor_type is self.TYPE_CONTACT:
return [DisplayCategory.CONTACT_SENSOR]
if sensor_type is self.TYPE_MOTION:
return [DisplayCategory.MOTION_SENSOR]
def interfaces(self):
"""Yield the supported interfaces."""
sensor_type = self.get_type()
if sensor_type is self.TYPE_CONTACT:
yield AlexaContactSensor(self.hass, self.entity)
elif sensor_type is self.TYPE_MOTION:
yield AlexaMotionSensor(self.hass, self.entity)
entity_conf = self.config.entity_config.get(self.entity.entity_id, {})
if CONF_DISPLAY_CATEGORIES in entity_conf:
if entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.DOORBELL:
yield AlexaDoorbellEventSource(self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
def get_type(self):
"""Return the type of binary sensor."""
attrs = self.entity.attributes
if attrs.get(ATTR_DEVICE_CLASS) in ("door", "garage_door", "opening", "window"):
return self.TYPE_CONTACT
if attrs.get(ATTR_DEVICE_CLASS) == "motion":
return self.TYPE_MOTION
@ENTITY_ADAPTERS.register(alarm_control_panel.DOMAIN)
class AlarmControlPanelCapabilities(AlexaEntity):
"""Class to represent Alarm capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SECURITY_PANEL]
def interfaces(self):
"""Yield the supported interfaces."""
if not self.entity.attributes.get("code_arm_required"):
yield AlexaSecurityPanelController(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
| {
"repo_name": "joopert/home-assistant",
"path": "homeassistant/components/alexa/entities.py",
"copies": "1",
"size": "19079",
"license": "apache-2.0",
"hash": 4173983716888928000,
"line_mean": 32.3548951049,
"line_max": 89,
"alpha_frac": 0.6653388542,
"autogenerated": false,
"ratio": 4.268232662192394,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5433571516392394,
"avg_score": null,
"num_lines": null
} |
"""Alexa entity adapters."""
import logging
from typing import List
from homeassistant.components import (
alarm_control_panel,
alert,
automation,
binary_sensor,
camera,
cover,
fan,
group,
image_processing,
input_boolean,
input_number,
light,
lock,
media_player,
scene,
script,
sensor,
switch,
timer,
vacuum,
)
from homeassistant.components.climate import const as climate
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_SUPPORTED_FEATURES,
ATTR_UNIT_OF_MEASUREMENT,
CLOUD_NEVER_EXPOSED_ENTITIES,
CONF_NAME,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import callback
from homeassistant.helpers import network
from homeassistant.util.decorator import Registry
from .capabilities import (
Alexa,
AlexaBrightnessController,
AlexaCameraStreamController,
AlexaChannelController,
AlexaColorController,
AlexaColorTemperatureController,
AlexaContactSensor,
AlexaDoorbellEventSource,
AlexaEndpointHealth,
AlexaEqualizerController,
AlexaEventDetectionSensor,
AlexaInputController,
AlexaLockController,
AlexaModeController,
AlexaMotionSensor,
AlexaPercentageController,
AlexaPlaybackController,
AlexaPlaybackStateReporter,
AlexaPowerController,
AlexaPowerLevelController,
AlexaRangeController,
AlexaSceneController,
AlexaSecurityPanelController,
AlexaSeekController,
AlexaSpeaker,
AlexaStepSpeaker,
AlexaTemperatureSensor,
AlexaThermostatController,
AlexaTimeHoldController,
AlexaToggleController,
)
from .const import CONF_DESCRIPTION, CONF_DISPLAY_CATEGORIES
_LOGGER = logging.getLogger(__name__)
ENTITY_ADAPTERS = Registry()
TRANSLATION_TABLE = dict.fromkeys(map(ord, r"}{\/|\"()[]+~!><*%"), None)
class DisplayCategory:
"""Possible display categories for Discovery response.
https://developer.amazon.com/docs/device-apis/alexa-discovery.html#display-categories
"""
# Describes a combination of devices set to a specific state, when the
# state change must occur in a specific order. For example, a "watch
# Netflix" scene might require the: 1. TV to be powered on & 2. Input set
# to HDMI1. Applies to Scenes
ACTIVITY_TRIGGER = "ACTIVITY_TRIGGER"
# Indicates media devices with video or photo capabilities.
CAMERA = "CAMERA"
# Indicates a non-mobile computer, such as a desktop computer.
COMPUTER = "COMPUTER"
# Indicates an endpoint that detects and reports contact.
CONTACT_SENSOR = "CONTACT_SENSOR"
# Indicates a door.
DOOR = "DOOR"
# Indicates a doorbell.
DOORBELL = "DOORBELL"
# Indicates a window covering on the outside of a structure.
EXTERIOR_BLIND = "EXTERIOR_BLIND"
# Indicates a fan.
FAN = "FAN"
# Indicates a game console, such as Microsoft Xbox or Nintendo Switch
GAME_CONSOLE = "GAME_CONSOLE"
# Indicates a garage door. Garage doors must implement the ModeController interface to open and close the door.
GARAGE_DOOR = "GARAGE_DOOR"
# Indicates a window covering on the inside of a structure.
INTERIOR_BLIND = "INTERIOR_BLIND"
# Indicates a laptop or other mobile computer.
LAPTOP = "LAPTOP"
# Indicates light sources or fixtures.
LIGHT = "LIGHT"
# Indicates a microwave oven.
MICROWAVE = "MICROWAVE"
# Indicates a mobile phone.
MOBILE_PHONE = "MOBILE_PHONE"
# Indicates an endpoint that detects and reports motion.
MOTION_SENSOR = "MOTION_SENSOR"
# Indicates a network-connected music system.
MUSIC_SYSTEM = "MUSIC_SYSTEM"
# An endpoint that cannot be described in on of the other categories.
OTHER = "OTHER"
# Indicates a network router.
NETWORK_HARDWARE = "NETWORK_HARDWARE"
# Indicates an oven cooking appliance.
OVEN = "OVEN"
# Indicates a non-mobile phone, such as landline or an IP phone.
PHONE = "PHONE"
# Describes a combination of devices set to a specific state, when the
# order of the state change is not important. For example a bedtime scene
# might include turning off lights and lowering the thermostat, but the
# order is unimportant. Applies to Scenes
SCENE_TRIGGER = "SCENE_TRIGGER"
# Indicates a projector screen.
SCREEN = "SCREEN"
# Indicates a security panel.
SECURITY_PANEL = "SECURITY_PANEL"
# Indicates an endpoint that locks.
SMARTLOCK = "SMARTLOCK"
# Indicates modules that are plugged into an existing electrical outlet.
# Can control a variety of devices.
SMARTPLUG = "SMARTPLUG"
# Indicates the endpoint is a speaker or speaker system.
SPEAKER = "SPEAKER"
# Indicates a streaming device such as Apple TV, Chromecast, or Roku.
STREAMING_DEVICE = "STREAMING_DEVICE"
# Indicates in-wall switches wired to the electrical system. Can control a
# variety of devices.
SWITCH = "SWITCH"
# Indicates a tablet computer.
TABLET = "TABLET"
# Indicates endpoints that report the temperature only.
TEMPERATURE_SENSOR = "TEMPERATURE_SENSOR"
# Indicates endpoints that control temperature, stand-alone air
# conditioners, or heaters with direct temperature control.
THERMOSTAT = "THERMOSTAT"
# Indicates the endpoint is a television.
TV = "TV"
# Indicates a network-connected wearable device, such as an Apple Watch, Fitbit, or Samsung Gear.
WEARABLE = "WEARABLE"
class AlexaEntity:
"""An adaptation of an entity, expressed in Alexa's terms.
The API handlers should manipulate entities only through this interface.
"""
def __init__(self, hass, config, entity):
"""Initialize Alexa Entity."""
self.hass = hass
self.config = config
self.entity = entity
self.entity_conf = config.entity_config.get(entity.entity_id, {})
@property
def entity_id(self):
"""Return the Entity ID."""
return self.entity.entity_id
def friendly_name(self):
"""Return the Alexa API friendly name."""
return self.entity_conf.get(CONF_NAME, self.entity.name).translate(
TRANSLATION_TABLE
)
def description(self):
"""Return the Alexa API description."""
description = self.entity_conf.get(CONF_DESCRIPTION) or self.entity_id
return f"{description} via Home Assistant".translate(TRANSLATION_TABLE)
def alexa_id(self):
"""Return the Alexa API entity id."""
return self.entity.entity_id.replace(".", "#").translate(TRANSLATION_TABLE)
def display_categories(self):
"""Return a list of display categories."""
entity_conf = self.config.entity_config.get(self.entity.entity_id, {})
if CONF_DISPLAY_CATEGORIES in entity_conf:
return [entity_conf[CONF_DISPLAY_CATEGORIES]]
return self.default_display_categories()
def default_display_categories(self):
"""Return a list of default display categories.
This can be overridden by the user in the Home Assistant configuration.
See also DisplayCategory.
"""
raise NotImplementedError
def get_interface(self, capability):
"""Return the given AlexaInterface.
Raises _UnsupportedInterface.
"""
def interfaces(self):
"""Return a list of supported interfaces.
Used for discovery. The list should contain AlexaInterface instances.
If the list is empty, this entity will not be discovered.
"""
raise NotImplementedError
def serialize_properties(self):
"""Yield each supported property in API format."""
for interface in self.interfaces():
if not interface.properties_proactively_reported():
continue
yield from interface.serialize_properties()
def serialize_discovery(self):
"""Serialize the entity for discovery."""
result = {
"displayCategories": self.display_categories(),
"cookie": {},
"endpointId": self.alexa_id(),
"friendlyName": self.friendly_name(),
"description": self.description(),
"manufacturerName": "Home Assistant",
}
locale = self.config.locale
capabilities = [
i.serialize_discovery()
for i in self.interfaces()
if locale in i.supported_locales
]
result["capabilities"] = capabilities
return result
@callback
def async_get_entities(hass, config) -> List[AlexaEntity]:
"""Return all entities that are supported by Alexa."""
entities = []
for state in hass.states.async_all():
if state.entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
continue
if state.domain not in ENTITY_ADAPTERS:
continue
alexa_entity = ENTITY_ADAPTERS[state.domain](hass, config, state)
if not list(alexa_entity.interfaces()):
continue
entities.append(alexa_entity)
return entities
@ENTITY_ADAPTERS.register(alert.DOMAIN)
@ENTITY_ADAPTERS.register(automation.DOMAIN)
@ENTITY_ADAPTERS.register(group.DOMAIN)
@ENTITY_ADAPTERS.register(input_boolean.DOMAIN)
class GenericCapabilities(AlexaEntity):
"""A generic, on/off device.
The choice of last resort.
"""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaPowerController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(switch.DOMAIN)
class SwitchCapabilities(AlexaEntity):
"""Class to represent Switch capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class == switch.DEVICE_CLASS_OUTLET:
return [DisplayCategory.SMARTPLUG]
return [DisplayCategory.SWITCH]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaPowerController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(climate.DOMAIN)
class ClimateCapabilities(AlexaEntity):
"""Class to represent Climate capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.THERMOSTAT]
def interfaces(self):
"""Yield the supported interfaces."""
# If we support two modes, one being off, we allow turning on too.
if climate.HVAC_MODE_OFF in self.entity.attributes.get(
climate.ATTR_HVAC_MODES, []
):
yield AlexaPowerController(self.entity)
yield AlexaThermostatController(self.hass, self.entity)
yield AlexaTemperatureSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(cover.DOMAIN)
class CoverCapabilities(AlexaEntity):
"""Class to represent Cover capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class in (cover.DEVICE_CLASS_GARAGE, cover.DEVICE_CLASS_GATE):
return [DisplayCategory.GARAGE_DOOR]
if device_class == cover.DEVICE_CLASS_DOOR:
return [DisplayCategory.DOOR]
if device_class in (
cover.DEVICE_CLASS_BLIND,
cover.DEVICE_CLASS_SHADE,
cover.DEVICE_CLASS_CURTAIN,
):
return [DisplayCategory.INTERIOR_BLIND]
if device_class in (
cover.DEVICE_CLASS_WINDOW,
cover.DEVICE_CLASS_AWNING,
cover.DEVICE_CLASS_SHUTTER,
):
return [DisplayCategory.EXTERIOR_BLIND]
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class not in (cover.DEVICE_CLASS_GARAGE, cover.DEVICE_CLASS_GATE):
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & cover.SUPPORT_SET_POSITION:
yield AlexaRangeController(
self.entity, instance=f"{cover.DOMAIN}.{cover.ATTR_POSITION}"
)
elif supported & (cover.SUPPORT_CLOSE | cover.SUPPORT_OPEN):
yield AlexaModeController(
self.entity, instance=f"{cover.DOMAIN}.{cover.ATTR_POSITION}"
)
if supported & cover.SUPPORT_SET_TILT_POSITION:
yield AlexaRangeController(self.entity, instance=f"{cover.DOMAIN}.tilt")
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(light.DOMAIN)
class LightCapabilities(AlexaEntity):
"""Class to represent Light capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.LIGHT]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & light.SUPPORT_BRIGHTNESS:
yield AlexaBrightnessController(self.entity)
if supported & light.SUPPORT_COLOR:
yield AlexaColorController(self.entity)
if supported & light.SUPPORT_COLOR_TEMP:
yield AlexaColorTemperatureController(self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(fan.DOMAIN)
class FanCapabilities(AlexaEntity):
"""Class to represent Fan capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.FAN]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & fan.SUPPORT_SET_SPEED:
yield AlexaPercentageController(self.entity)
yield AlexaPowerLevelController(self.entity)
yield AlexaRangeController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_SPEED}"
)
if supported & fan.SUPPORT_OSCILLATE:
yield AlexaToggleController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}"
)
if supported & fan.SUPPORT_DIRECTION:
yield AlexaModeController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_DIRECTION}"
)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(lock.DOMAIN)
class LockCapabilities(AlexaEntity):
"""Class to represent Lock capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SMARTLOCK]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaLockController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(media_player.const.DOMAIN)
class MediaPlayerCapabilities(AlexaEntity):
"""Class to represent MediaPlayer capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class == media_player.DEVICE_CLASS_SPEAKER:
return [DisplayCategory.SPEAKER]
return [DisplayCategory.TV]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & media_player.const.SUPPORT_VOLUME_SET:
yield AlexaSpeaker(self.entity)
elif supported & media_player.const.SUPPORT_VOLUME_STEP:
yield AlexaStepSpeaker(self.entity)
playback_features = (
media_player.const.SUPPORT_PLAY
| media_player.const.SUPPORT_PAUSE
| media_player.const.SUPPORT_STOP
| media_player.const.SUPPORT_NEXT_TRACK
| media_player.const.SUPPORT_PREVIOUS_TRACK
)
if supported & playback_features:
yield AlexaPlaybackController(self.entity)
yield AlexaPlaybackStateReporter(self.entity)
if supported & media_player.const.SUPPORT_SEEK:
yield AlexaSeekController(self.entity)
if supported & media_player.SUPPORT_SELECT_SOURCE:
inputs = AlexaInputController.get_valid_inputs(
self.entity.attributes.get(
media_player.const.ATTR_INPUT_SOURCE_LIST, []
)
)
if len(inputs) > 0:
yield AlexaInputController(self.entity)
if supported & media_player.const.SUPPORT_PLAY_MEDIA:
yield AlexaChannelController(self.entity)
if supported & media_player.const.SUPPORT_SELECT_SOUND_MODE:
inputs = AlexaInputController.get_valid_inputs(
self.entity.attributes.get(media_player.const.ATTR_SOUND_MODE_LIST, [])
)
if len(inputs) > 0:
yield AlexaEqualizerController(self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(scene.DOMAIN)
class SceneCapabilities(AlexaEntity):
"""Class to represent Scene capabilities."""
def description(self):
"""Return the Alexa API description."""
description = AlexaEntity.description(self)
if "scene" not in description.casefold():
return f"{description} (Scene)"
return description
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SCENE_TRIGGER]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaSceneController(self.entity, supports_deactivation=False),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(script.DOMAIN)
class ScriptCapabilities(AlexaEntity):
"""Class to represent Script capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.ACTIVITY_TRIGGER]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaSceneController(self.entity, supports_deactivation=True),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(sensor.DOMAIN)
class SensorCapabilities(AlexaEntity):
"""Class to represent Sensor capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
# although there are other kinds of sensors, all but temperature
# sensors are currently ignored.
return [DisplayCategory.TEMPERATURE_SENSOR]
def interfaces(self):
"""Yield the supported interfaces."""
attrs = self.entity.attributes
if attrs.get(ATTR_UNIT_OF_MEASUREMENT) in (TEMP_FAHRENHEIT, TEMP_CELSIUS):
yield AlexaTemperatureSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(binary_sensor.DOMAIN)
class BinarySensorCapabilities(AlexaEntity):
"""Class to represent BinarySensor capabilities."""
TYPE_CONTACT = "contact"
TYPE_MOTION = "motion"
TYPE_PRESENCE = "presence"
def default_display_categories(self):
"""Return the display categories for this entity."""
sensor_type = self.get_type()
if sensor_type is self.TYPE_CONTACT:
return [DisplayCategory.CONTACT_SENSOR]
if sensor_type is self.TYPE_MOTION:
return [DisplayCategory.MOTION_SENSOR]
if sensor_type is self.TYPE_PRESENCE:
return [DisplayCategory.CAMERA]
def interfaces(self):
"""Yield the supported interfaces."""
sensor_type = self.get_type()
if sensor_type is self.TYPE_CONTACT:
yield AlexaContactSensor(self.hass, self.entity)
elif sensor_type is self.TYPE_MOTION:
yield AlexaMotionSensor(self.hass, self.entity)
elif sensor_type is self.TYPE_PRESENCE:
yield AlexaEventDetectionSensor(self.hass, self.entity)
# yield additional interfaces based on specified display category in config.
entity_conf = self.config.entity_config.get(self.entity.entity_id, {})
if CONF_DISPLAY_CATEGORIES in entity_conf:
if entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.DOORBELL:
yield AlexaDoorbellEventSource(self.entity)
elif entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.CONTACT_SENSOR:
yield AlexaContactSensor(self.hass, self.entity)
elif entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.MOTION_SENSOR:
yield AlexaMotionSensor(self.hass, self.entity)
elif entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.CAMERA:
yield AlexaEventDetectionSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
def get_type(self):
"""Return the type of binary sensor."""
attrs = self.entity.attributes
if attrs.get(ATTR_DEVICE_CLASS) in (
binary_sensor.DEVICE_CLASS_DOOR,
binary_sensor.DEVICE_CLASS_GARAGE_DOOR,
binary_sensor.DEVICE_CLASS_OPENING,
binary_sensor.DEVICE_CLASS_WINDOW,
):
return self.TYPE_CONTACT
if attrs.get(ATTR_DEVICE_CLASS) == binary_sensor.DEVICE_CLASS_MOTION:
return self.TYPE_MOTION
if attrs.get(ATTR_DEVICE_CLASS) == binary_sensor.DEVICE_CLASS_PRESENCE:
return self.TYPE_PRESENCE
@ENTITY_ADAPTERS.register(alarm_control_panel.DOMAIN)
class AlarmControlPanelCapabilities(AlexaEntity):
"""Class to represent Alarm capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SECURITY_PANEL]
def interfaces(self):
"""Yield the supported interfaces."""
if not self.entity.attributes.get("code_arm_required"):
yield AlexaSecurityPanelController(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(image_processing.DOMAIN)
class ImageProcessingCapabilities(AlexaEntity):
"""Class to represent image_processing capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.CAMERA]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaEventDetectionSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(input_number.DOMAIN)
class InputNumberCapabilities(AlexaEntity):
"""Class to represent input_number capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaRangeController(
self.entity, instance=f"{input_number.DOMAIN}.{input_number.ATTR_VALUE}"
)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(timer.DOMAIN)
class TimerCapabilities(AlexaEntity):
"""Class to represent Timer capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaTimeHoldController(self.entity, allow_remote_resume=True)
yield AlexaPowerController(self.entity)
yield Alexa(self.entity)
@ENTITY_ADAPTERS.register(vacuum.DOMAIN)
class VacuumCapabilities(AlexaEntity):
"""Class to represent vacuum capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if (
(supported & vacuum.SUPPORT_TURN_ON) or (supported & vacuum.SUPPORT_START)
) and (
(supported & vacuum.SUPPORT_TURN_OFF)
or (supported & vacuum.SUPPORT_RETURN_HOME)
):
yield AlexaPowerController(self.entity)
if supported & vacuum.SUPPORT_FAN_SPEED:
yield AlexaRangeController(
self.entity, instance=f"{vacuum.DOMAIN}.{vacuum.ATTR_FAN_SPEED}"
)
if supported & vacuum.SUPPORT_PAUSE:
support_resume = bool(supported & vacuum.SUPPORT_START)
yield AlexaTimeHoldController(
self.entity, allow_remote_resume=support_resume
)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(camera.DOMAIN)
class CameraCapabilities(AlexaEntity):
"""Class to represent Camera capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.CAMERA]
def interfaces(self):
"""Yield the supported interfaces."""
if self._check_requirements():
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & camera.SUPPORT_STREAM:
yield AlexaCameraStreamController(self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
def _check_requirements(self):
"""Check the hass URL for HTTPS scheme."""
if "stream" not in self.hass.config.components:
_LOGGER.debug(
"%s requires stream component for AlexaCameraStreamController",
self.entity_id,
)
return False
try:
network.get_url(
self.hass,
allow_internal=False,
allow_ip=False,
require_ssl=True,
require_standard_port=True,
)
except network.NoURLAvailableError:
_LOGGER.debug(
"%s requires HTTPS for AlexaCameraStreamController", self.entity_id
)
return False
return True
| {
"repo_name": "mKeRix/home-assistant",
"path": "homeassistant/components/alexa/entities.py",
"copies": "3",
"size": "27599",
"license": "mit",
"hash": 1779032652107930000,
"line_mean": 32.739608802,
"line_max": 115,
"alpha_frac": 0.6585021196,
"autogenerated": false,
"ratio": 4.251232285890326,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6409734405490326,
"avg_score": null,
"num_lines": null
} |
"""Alexa entity adapters."""
import logging
from typing import TYPE_CHECKING, List
from homeassistant.components import (
alarm_control_panel,
alert,
automation,
binary_sensor,
camera,
cover,
fan,
group,
image_processing,
input_boolean,
input_number,
light,
lock,
media_player,
scene,
script,
sensor,
switch,
timer,
vacuum,
)
from homeassistant.components.climate import const as climate
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_SUPPORTED_FEATURES,
ATTR_UNIT_OF_MEASUREMENT,
CLOUD_NEVER_EXPOSED_ENTITIES,
CONF_NAME,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
__version__,
)
from homeassistant.core import HomeAssistant, State, callback
from homeassistant.helpers import network
from homeassistant.util.decorator import Registry
from .capabilities import (
Alexa,
AlexaBrightnessController,
AlexaCameraStreamController,
AlexaCapability,
AlexaChannelController,
AlexaColorController,
AlexaColorTemperatureController,
AlexaContactSensor,
AlexaDoorbellEventSource,
AlexaEndpointHealth,
AlexaEqualizerController,
AlexaEventDetectionSensor,
AlexaInputController,
AlexaLockController,
AlexaModeController,
AlexaMotionSensor,
AlexaPercentageController,
AlexaPlaybackController,
AlexaPlaybackStateReporter,
AlexaPowerController,
AlexaPowerLevelController,
AlexaRangeController,
AlexaSceneController,
AlexaSecurityPanelController,
AlexaSeekController,
AlexaSpeaker,
AlexaStepSpeaker,
AlexaTemperatureSensor,
AlexaThermostatController,
AlexaTimeHoldController,
AlexaToggleController,
)
from .const import CONF_DESCRIPTION, CONF_DISPLAY_CATEGORIES
if TYPE_CHECKING:
from .config import AbstractConfig
_LOGGER = logging.getLogger(__name__)
ENTITY_ADAPTERS = Registry()
TRANSLATION_TABLE = dict.fromkeys(map(ord, r"}{\/|\"()[]+~!><*%"), None)
class DisplayCategory:
"""Possible display categories for Discovery response.
https://developer.amazon.com/docs/device-apis/alexa-discovery.html#display-categories
"""
# Describes a combination of devices set to a specific state, when the
# state change must occur in a specific order. For example, a "watch
# Netflix" scene might require the: 1. TV to be powered on & 2. Input set
# to HDMI1. Applies to Scenes
ACTIVITY_TRIGGER = "ACTIVITY_TRIGGER"
# Indicates a device that emits pleasant odors and masks unpleasant odors in interior spaces.
AIR_FRESHENER = "AIR_FRESHENER"
# Indicates a device that improves the quality of air in interior spaces.
AIR_PURIFIER = "AIR_PURIFIER"
# Indicates a smart device in an automobile, such as a dash camera.
AUTO_ACCESSORY = "AUTO_ACCESSORY"
# Indicates a security device with video or photo functionality.
CAMERA = "CAMERA"
# Indicates a religious holiday decoration that often contains lights.
CHRISTMAS_TREE = "CHRISTMAS_TREE"
# Indicates a device that makes coffee.
COFFEE_MAKER = "COFFEE_MAKER"
# Indicates a non-mobile computer, such as a desktop computer.
COMPUTER = "COMPUTER"
# Indicates an endpoint that detects and reports contact.
CONTACT_SENSOR = "CONTACT_SENSOR"
# Indicates a door.
DOOR = "DOOR"
# Indicates a doorbell.
DOORBELL = "DOORBELL"
# Indicates a window covering on the outside of a structure.
EXTERIOR_BLIND = "EXTERIOR_BLIND"
# Indicates a fan.
FAN = "FAN"
# Indicates a game console, such as Microsoft Xbox or Nintendo Switch
GAME_CONSOLE = "GAME_CONSOLE"
# Indicates a garage door.
# Garage doors must implement the ModeController interface to open and close the door.
GARAGE_DOOR = "GARAGE_DOOR"
# Indicates a wearable device that transmits audio directly into the ear.
HEADPHONES = "HEADPHONES"
# Indicates a smart-home hub.
HUB = "HUB"
# Indicates a window covering on the inside of a structure.
INTERIOR_BLIND = "INTERIOR_BLIND"
# Indicates a laptop or other mobile computer.
LAPTOP = "LAPTOP"
# Indicates light sources or fixtures.
LIGHT = "LIGHT"
# Indicates a microwave oven.
MICROWAVE = "MICROWAVE"
# Indicates a mobile phone.
MOBILE_PHONE = "MOBILE_PHONE"
# Indicates an endpoint that detects and reports motion.
MOTION_SENSOR = "MOTION_SENSOR"
# Indicates a network-connected music system.
MUSIC_SYSTEM = "MUSIC_SYSTEM"
# Indicates a network router.
NETWORK_HARDWARE = "NETWORK_HARDWARE"
# An endpoint that cannot be described in on of the other categories.
OTHER = "OTHER"
# Indicates an oven cooking appliance.
OVEN = "OVEN"
# Indicates a non-mobile phone, such as landline or an IP phone.
PHONE = "PHONE"
# Indicates a device that prints.
PRINTER = "PRINTER"
# Indicates a network router.
ROUTER = "ROUTER"
# Describes a combination of devices set to a specific state, when the
# order of the state change is not important. For example a bedtime scene
# might include turning off lights and lowering the thermostat, but the
# order is unimportant. Applies to Scenes
SCENE_TRIGGER = "SCENE_TRIGGER"
# Indicates a projector screen.
SCREEN = "SCREEN"
# Indicates a security panel.
SECURITY_PANEL = "SECURITY_PANEL"
# Indicates a security system.
SECURITY_SYSTEM = "SECURITY_SYSTEM"
# Indicates an electric cooking device that sits on a countertop, cooks at low temperatures,
# and is often shaped like a cooking pot.
SLOW_COOKER = "SLOW_COOKER"
# Indicates an endpoint that locks.
SMARTLOCK = "SMARTLOCK"
# Indicates modules that are plugged into an existing electrical outlet.
# Can control a variety of devices.
SMARTPLUG = "SMARTPLUG"
# Indicates the endpoint is a speaker or speaker system.
SPEAKER = "SPEAKER"
# Indicates a streaming device such as Apple TV, Chromecast, or Roku.
STREAMING_DEVICE = "STREAMING_DEVICE"
# Indicates in-wall switches wired to the electrical system. Can control a
# variety of devices.
SWITCH = "SWITCH"
# Indicates a tablet computer.
TABLET = "TABLET"
# Indicates endpoints that report the temperature only.
TEMPERATURE_SENSOR = "TEMPERATURE_SENSOR"
# Indicates endpoints that control temperature, stand-alone air
# conditioners, or heaters with direct temperature control.
THERMOSTAT = "THERMOSTAT"
# Indicates the endpoint is a television.
TV = "TV"
# Indicates a vacuum cleaner.
VACUUM_CLEANER = "VACUUM_CLEANER"
# Indicates a network-connected wearable device, such as an Apple Watch, Fitbit, or Samsung Gear.
WEARABLE = "WEARABLE"
def generate_alexa_id(entity_id: str) -> str:
"""Return the alexa ID for an entity ID."""
return entity_id.replace(".", "#").translate(TRANSLATION_TABLE)
class AlexaEntity:
"""An adaptation of an entity, expressed in Alexa's terms.
The API handlers should manipulate entities only through this interface.
"""
def __init__(self, hass: HomeAssistant, config: "AbstractConfig", entity: State):
"""Initialize Alexa Entity."""
self.hass = hass
self.config = config
self.entity = entity
self.entity_conf = config.entity_config.get(entity.entity_id, {})
@property
def entity_id(self):
"""Return the Entity ID."""
return self.entity.entity_id
def friendly_name(self):
"""Return the Alexa API friendly name."""
return self.entity_conf.get(CONF_NAME, self.entity.name).translate(
TRANSLATION_TABLE
)
def description(self):
"""Return the Alexa API description."""
description = self.entity_conf.get(CONF_DESCRIPTION) or self.entity_id
return f"{description} via Home Assistant".translate(TRANSLATION_TABLE)
def alexa_id(self):
"""Return the Alexa API entity id."""
return generate_alexa_id(self.entity.entity_id)
def display_categories(self):
"""Return a list of display categories."""
entity_conf = self.config.entity_config.get(self.entity.entity_id, {})
if CONF_DISPLAY_CATEGORIES in entity_conf:
return [entity_conf[CONF_DISPLAY_CATEGORIES]]
return self.default_display_categories()
def default_display_categories(self):
"""Return a list of default display categories.
This can be overridden by the user in the Home Assistant configuration.
See also DisplayCategory.
"""
raise NotImplementedError
def get_interface(self, capability) -> AlexaCapability:
"""Return the given AlexaInterface.
Raises _UnsupportedInterface.
"""
def interfaces(self) -> List[AlexaCapability]:
"""Return a list of supported interfaces.
Used for discovery. The list should contain AlexaInterface instances.
If the list is empty, this entity will not be discovered.
"""
raise NotImplementedError
def serialize_properties(self):
"""Yield each supported property in API format."""
for interface in self.interfaces():
if not interface.properties_proactively_reported():
continue
yield from interface.serialize_properties()
def serialize_discovery(self):
"""Serialize the entity for discovery."""
result = {
"displayCategories": self.display_categories(),
"cookie": {},
"endpointId": self.alexa_id(),
"friendlyName": self.friendly_name(),
"description": self.description(),
"manufacturerName": "Home Assistant",
"additionalAttributes": {
"manufacturer": "Home Assistant",
"model": self.entity.domain,
"softwareVersion": __version__,
"customIdentifier": f"{self.config.user_identifier()}-{self.entity_id}",
},
}
locale = self.config.locale
capabilities = []
for i in self.interfaces():
if locale not in i.supported_locales:
continue
try:
capabilities.append(i.serialize_discovery())
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error serializing %s discovery for %s", i.name(), self.entity
)
result["capabilities"] = capabilities
return result
@callback
def async_get_entities(hass, config) -> List[AlexaEntity]:
"""Return all entities that are supported by Alexa."""
entities = []
for state in hass.states.async_all():
if state.entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
continue
if state.domain not in ENTITY_ADAPTERS:
continue
alexa_entity = ENTITY_ADAPTERS[state.domain](hass, config, state)
if not list(alexa_entity.interfaces()):
continue
entities.append(alexa_entity)
return entities
@ENTITY_ADAPTERS.register(alert.DOMAIN)
@ENTITY_ADAPTERS.register(automation.DOMAIN)
@ENTITY_ADAPTERS.register(group.DOMAIN)
@ENTITY_ADAPTERS.register(input_boolean.DOMAIN)
class GenericCapabilities(AlexaEntity):
"""A generic, on/off device.
The choice of last resort.
"""
def default_display_categories(self):
"""Return the display categories for this entity."""
if self.entity.domain == automation.DOMAIN:
return [DisplayCategory.ACTIVITY_TRIGGER]
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaPowerController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(switch.DOMAIN)
class SwitchCapabilities(AlexaEntity):
"""Class to represent Switch capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class == switch.DEVICE_CLASS_OUTLET:
return [DisplayCategory.SMARTPLUG]
return [DisplayCategory.SWITCH]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaPowerController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(climate.DOMAIN)
class ClimateCapabilities(AlexaEntity):
"""Class to represent Climate capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.THERMOSTAT]
def interfaces(self):
"""Yield the supported interfaces."""
# If we support two modes, one being off, we allow turning on too.
if climate.HVAC_MODE_OFF in self.entity.attributes.get(
climate.ATTR_HVAC_MODES, []
):
yield AlexaPowerController(self.entity)
yield AlexaThermostatController(self.hass, self.entity)
yield AlexaTemperatureSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(cover.DOMAIN)
class CoverCapabilities(AlexaEntity):
"""Class to represent Cover capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class in (cover.DEVICE_CLASS_GARAGE, cover.DEVICE_CLASS_GATE):
return [DisplayCategory.GARAGE_DOOR]
if device_class == cover.DEVICE_CLASS_DOOR:
return [DisplayCategory.DOOR]
if device_class in (
cover.DEVICE_CLASS_BLIND,
cover.DEVICE_CLASS_SHADE,
cover.DEVICE_CLASS_CURTAIN,
):
return [DisplayCategory.INTERIOR_BLIND]
if device_class in (
cover.DEVICE_CLASS_WINDOW,
cover.DEVICE_CLASS_AWNING,
cover.DEVICE_CLASS_SHUTTER,
):
return [DisplayCategory.EXTERIOR_BLIND]
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class not in (cover.DEVICE_CLASS_GARAGE, cover.DEVICE_CLASS_GATE):
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & cover.SUPPORT_SET_POSITION:
yield AlexaRangeController(
self.entity, instance=f"{cover.DOMAIN}.{cover.ATTR_POSITION}"
)
elif supported & (cover.SUPPORT_CLOSE | cover.SUPPORT_OPEN):
yield AlexaModeController(
self.entity, instance=f"{cover.DOMAIN}.{cover.ATTR_POSITION}"
)
if supported & cover.SUPPORT_SET_TILT_POSITION:
yield AlexaRangeController(self.entity, instance=f"{cover.DOMAIN}.tilt")
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(light.DOMAIN)
class LightCapabilities(AlexaEntity):
"""Class to represent Light capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.LIGHT]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & light.SUPPORT_BRIGHTNESS:
yield AlexaBrightnessController(self.entity)
if supported & light.SUPPORT_COLOR:
yield AlexaColorController(self.entity)
if supported & light.SUPPORT_COLOR_TEMP:
yield AlexaColorTemperatureController(self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(fan.DOMAIN)
class FanCapabilities(AlexaEntity):
"""Class to represent Fan capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.FAN]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & fan.SUPPORT_SET_SPEED:
yield AlexaPercentageController(self.entity)
yield AlexaPowerLevelController(self.entity)
yield AlexaRangeController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_SPEED}"
)
if supported & fan.SUPPORT_OSCILLATE:
yield AlexaToggleController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}"
)
if supported & fan.SUPPORT_DIRECTION:
yield AlexaModeController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_DIRECTION}"
)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(lock.DOMAIN)
class LockCapabilities(AlexaEntity):
"""Class to represent Lock capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SMARTLOCK]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaLockController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(media_player.const.DOMAIN)
class MediaPlayerCapabilities(AlexaEntity):
"""Class to represent MediaPlayer capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class == media_player.DEVICE_CLASS_SPEAKER:
return [DisplayCategory.SPEAKER]
return [DisplayCategory.TV]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & media_player.const.SUPPORT_VOLUME_SET:
yield AlexaSpeaker(self.entity)
elif supported & media_player.const.SUPPORT_VOLUME_STEP:
yield AlexaStepSpeaker(self.entity)
playback_features = (
media_player.const.SUPPORT_PLAY
| media_player.const.SUPPORT_PAUSE
| media_player.const.SUPPORT_STOP
| media_player.const.SUPPORT_NEXT_TRACK
| media_player.const.SUPPORT_PREVIOUS_TRACK
)
if supported & playback_features:
yield AlexaPlaybackController(self.entity)
yield AlexaPlaybackStateReporter(self.entity)
if supported & media_player.const.SUPPORT_SEEK:
yield AlexaSeekController(self.entity)
if supported & media_player.SUPPORT_SELECT_SOURCE:
inputs = AlexaInputController.get_valid_inputs(
self.entity.attributes.get(
media_player.const.ATTR_INPUT_SOURCE_LIST, []
)
)
if len(inputs) > 0:
yield AlexaInputController(self.entity)
if supported & media_player.const.SUPPORT_PLAY_MEDIA:
yield AlexaChannelController(self.entity)
if supported & media_player.const.SUPPORT_SELECT_SOUND_MODE:
inputs = AlexaInputController.get_valid_inputs(
self.entity.attributes.get(media_player.const.ATTR_SOUND_MODE_LIST, [])
)
if len(inputs) > 0:
yield AlexaEqualizerController(self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(scene.DOMAIN)
class SceneCapabilities(AlexaEntity):
"""Class to represent Scene capabilities."""
def description(self):
"""Return the Alexa API description."""
description = AlexaEntity.description(self)
if "scene" not in description.casefold():
return f"{description} (Scene)"
return description
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SCENE_TRIGGER]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaSceneController(self.entity, supports_deactivation=False),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(script.DOMAIN)
class ScriptCapabilities(AlexaEntity):
"""Class to represent Script capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.ACTIVITY_TRIGGER]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaSceneController(self.entity, supports_deactivation=True),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(sensor.DOMAIN)
class SensorCapabilities(AlexaEntity):
"""Class to represent Sensor capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
# although there are other kinds of sensors, all but temperature
# sensors are currently ignored.
return [DisplayCategory.TEMPERATURE_SENSOR]
def interfaces(self):
"""Yield the supported interfaces."""
attrs = self.entity.attributes
if attrs.get(ATTR_UNIT_OF_MEASUREMENT) in (TEMP_FAHRENHEIT, TEMP_CELSIUS):
yield AlexaTemperatureSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(binary_sensor.DOMAIN)
class BinarySensorCapabilities(AlexaEntity):
"""Class to represent BinarySensor capabilities."""
TYPE_CONTACT = "contact"
TYPE_MOTION = "motion"
TYPE_PRESENCE = "presence"
def default_display_categories(self):
"""Return the display categories for this entity."""
sensor_type = self.get_type()
if sensor_type is self.TYPE_CONTACT:
return [DisplayCategory.CONTACT_SENSOR]
if sensor_type is self.TYPE_MOTION:
return [DisplayCategory.MOTION_SENSOR]
if sensor_type is self.TYPE_PRESENCE:
return [DisplayCategory.CAMERA]
def interfaces(self):
"""Yield the supported interfaces."""
sensor_type = self.get_type()
if sensor_type is self.TYPE_CONTACT:
yield AlexaContactSensor(self.hass, self.entity)
elif sensor_type is self.TYPE_MOTION:
yield AlexaMotionSensor(self.hass, self.entity)
elif sensor_type is self.TYPE_PRESENCE:
yield AlexaEventDetectionSensor(self.hass, self.entity)
# yield additional interfaces based on specified display category in config.
entity_conf = self.config.entity_config.get(self.entity.entity_id, {})
if CONF_DISPLAY_CATEGORIES in entity_conf:
if entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.DOORBELL:
yield AlexaDoorbellEventSource(self.entity)
elif entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.CONTACT_SENSOR:
yield AlexaContactSensor(self.hass, self.entity)
elif entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.MOTION_SENSOR:
yield AlexaMotionSensor(self.hass, self.entity)
elif entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.CAMERA:
yield AlexaEventDetectionSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
def get_type(self):
"""Return the type of binary sensor."""
attrs = self.entity.attributes
if attrs.get(ATTR_DEVICE_CLASS) in (
binary_sensor.DEVICE_CLASS_DOOR,
binary_sensor.DEVICE_CLASS_GARAGE_DOOR,
binary_sensor.DEVICE_CLASS_OPENING,
binary_sensor.DEVICE_CLASS_WINDOW,
):
return self.TYPE_CONTACT
if attrs.get(ATTR_DEVICE_CLASS) == binary_sensor.DEVICE_CLASS_MOTION:
return self.TYPE_MOTION
if attrs.get(ATTR_DEVICE_CLASS) == binary_sensor.DEVICE_CLASS_PRESENCE:
return self.TYPE_PRESENCE
@ENTITY_ADAPTERS.register(alarm_control_panel.DOMAIN)
class AlarmControlPanelCapabilities(AlexaEntity):
"""Class to represent Alarm capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SECURITY_PANEL]
def interfaces(self):
"""Yield the supported interfaces."""
if not self.entity.attributes.get("code_arm_required"):
yield AlexaSecurityPanelController(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(image_processing.DOMAIN)
class ImageProcessingCapabilities(AlexaEntity):
"""Class to represent image_processing capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.CAMERA]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaEventDetectionSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(input_number.DOMAIN)
class InputNumberCapabilities(AlexaEntity):
"""Class to represent input_number capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaRangeController(
self.entity, instance=f"{input_number.DOMAIN}.{input_number.ATTR_VALUE}"
)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(timer.DOMAIN)
class TimerCapabilities(AlexaEntity):
"""Class to represent Timer capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaTimeHoldController(self.entity, allow_remote_resume=True)
yield AlexaPowerController(self.entity)
yield Alexa(self.entity)
@ENTITY_ADAPTERS.register(vacuum.DOMAIN)
class VacuumCapabilities(AlexaEntity):
"""Class to represent vacuum capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.VACUUM_CLEANER]
def interfaces(self):
"""Yield the supported interfaces."""
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if (
(supported & vacuum.SUPPORT_TURN_ON) or (supported & vacuum.SUPPORT_START)
) and (
(supported & vacuum.SUPPORT_TURN_OFF)
or (supported & vacuum.SUPPORT_RETURN_HOME)
):
yield AlexaPowerController(self.entity)
if supported & vacuum.SUPPORT_FAN_SPEED:
yield AlexaRangeController(
self.entity, instance=f"{vacuum.DOMAIN}.{vacuum.ATTR_FAN_SPEED}"
)
if supported & vacuum.SUPPORT_PAUSE:
support_resume = bool(supported & vacuum.SUPPORT_START)
yield AlexaTimeHoldController(
self.entity, allow_remote_resume=support_resume
)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(camera.DOMAIN)
class CameraCapabilities(AlexaEntity):
"""Class to represent Camera capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.CAMERA]
def interfaces(self):
"""Yield the supported interfaces."""
if self._check_requirements():
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & camera.SUPPORT_STREAM:
yield AlexaCameraStreamController(self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
def _check_requirements(self):
"""Check the hass URL for HTTPS scheme."""
if "stream" not in self.hass.config.components:
_LOGGER.debug(
"%s requires stream component for AlexaCameraStreamController",
self.entity_id,
)
return False
try:
network.get_url(
self.hass,
allow_internal=False,
allow_ip=False,
require_ssl=True,
require_standard_port=True,
)
except network.NoURLAvailableError:
_LOGGER.debug(
"%s requires HTTPS for AlexaCameraStreamController", self.entity_id
)
return False
return True
| {
"repo_name": "partofthething/home-assistant",
"path": "homeassistant/components/alexa/entities.py",
"copies": "3",
"size": "29781",
"license": "mit",
"hash": -1036778919343009700,
"line_mean": 32.7653061224,
"line_max": 101,
"alpha_frac": 0.6577347974,
"autogenerated": false,
"ratio": 4.215286624203822,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00027445614768307444,
"num_lines": 882
} |
"""Alexa HTTP interface."""
import logging
from homeassistant import core
from homeassistant.components.http.view import HomeAssistantView
from .auth import Auth
from .config import AbstractConfig
from .const import (
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_ENDPOINT,
CONF_ENTITY_CONFIG,
CONF_FILTER,
)
from .smart_home import async_handle_message
from .state_report import async_enable_proactive_mode
_LOGGER = logging.getLogger(__name__)
SMART_HOME_HTTP_ENDPOINT = "/api/alexa/smart_home"
class AlexaConfig(AbstractConfig):
"""Alexa config."""
def __init__(self, hass, config):
"""Initialize Alexa config."""
super().__init__(hass)
self._config = config
if config.get(CONF_CLIENT_ID) and config.get(CONF_CLIENT_SECRET):
self._auth = Auth(hass, config[CONF_CLIENT_ID], config[CONF_CLIENT_SECRET])
else:
self._auth = None
@property
def supports_auth(self):
"""Return if config supports auth."""
return self._auth is not None
@property
def should_report_state(self):
"""Return if we should proactively report states."""
return self._auth is not None
@property
def endpoint(self):
"""Endpoint for report state."""
return self._config.get(CONF_ENDPOINT)
@property
def entity_config(self):
"""Return entity config."""
return self._config.get(CONF_ENTITY_CONFIG) or {}
def should_expose(self, entity_id):
"""If an entity should be exposed."""
return self._config[CONF_FILTER](entity_id)
@core.callback
def async_invalidate_access_token(self):
"""Invalidate access token."""
self._auth.async_invalidate_access_token()
async def async_get_access_token(self):
"""Get an access token."""
return await self._auth.async_get_access_token()
async def async_accept_grant(self, code):
"""Accept a grant."""
return await self._auth.async_do_auth(code)
async def async_setup(hass, config):
"""Activate Smart Home functionality of Alexa component.
This is optional, triggered by having a `smart_home:` sub-section in the
alexa configuration.
Even if that's disabled, the functionality in this module may still be used
by the cloud component which will call async_handle_message directly.
"""
smart_home_config = AlexaConfig(hass, config)
hass.http.register_view(SmartHomeView(smart_home_config))
if smart_home_config.should_report_state:
await async_enable_proactive_mode(hass, smart_home_config)
class SmartHomeView(HomeAssistantView):
"""Expose Smart Home v3 payload interface via HTTP POST."""
url = SMART_HOME_HTTP_ENDPOINT
name = "api:alexa:smart_home"
def __init__(self, smart_home_config):
"""Initialize."""
self.smart_home_config = smart_home_config
async def post(self, request):
"""Handle Alexa Smart Home requests.
The Smart Home API requires the endpoint to be implemented in AWS
Lambda, which will need to forward the requests to here and pass back
the response.
"""
hass = request.app["hass"]
user = request["hass_user"]
message = await request.json()
_LOGGER.debug("Received Alexa Smart Home request: %s", message)
response = await async_handle_message(
hass, self.smart_home_config, message, context=core.Context(user_id=user.id)
)
_LOGGER.debug("Sending Alexa Smart Home response: %s", response)
return b"" if response is None else self.json(response)
| {
"repo_name": "leppa/home-assistant",
"path": "homeassistant/components/alexa/smart_home_http.py",
"copies": "1",
"size": "3648",
"license": "apache-2.0",
"hash": 5396199520842676000,
"line_mean": 30.1794871795,
"line_max": 88,
"alpha_frac": 0.6559758772,
"autogenerated": false,
"ratio": 4.075977653631285,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00019315890102406956,
"num_lines": 117
} |
"""Alexa message handlers."""
import logging
import math
from homeassistant import core as ha
from homeassistant.components import (
camera,
cover,
fan,
group,
input_number,
light,
media_player,
timer,
vacuum,
)
from homeassistant.components.climate import const as climate
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_ENTITY_PICTURE,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
SERVICE_LOCK,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_STOP,
SERVICE_SET_COVER_POSITION,
SERVICE_SET_COVER_TILT_POSITION,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_UNLOCK,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
SERVICE_VOLUME_UP,
STATE_ALARM_DISARMED,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers import network
import homeassistant.util.color as color_util
from homeassistant.util.decorator import Registry
import homeassistant.util.dt as dt_util
from homeassistant.util.temperature import convert as convert_temperature
from .const import (
API_TEMP_UNITS,
API_THERMOSTAT_MODES,
API_THERMOSTAT_MODES_CUSTOM,
API_THERMOSTAT_PRESETS,
PERCENTAGE_FAN_MAP,
Cause,
Inputs,
)
from .entities import async_get_entities
from .errors import (
AlexaInvalidDirectiveError,
AlexaInvalidValueError,
AlexaSecurityPanelAuthorizationRequired,
AlexaSecurityPanelUnauthorizedError,
AlexaTempRangeError,
AlexaUnsupportedThermostatModeError,
AlexaVideoActionNotPermittedForContentError,
)
from .state_report import async_enable_proactive_mode
_LOGGER = logging.getLogger(__name__)
HANDLERS = Registry()
@HANDLERS.register(("Alexa.Discovery", "Discover"))
async def async_api_discovery(hass, config, directive, context):
"""Create a API formatted discovery response.
Async friendly.
"""
discovery_endpoints = [
alexa_entity.serialize_discovery()
for alexa_entity in async_get_entities(hass, config)
if config.should_expose(alexa_entity.entity_id)
]
return directive.response(
name="Discover.Response",
namespace="Alexa.Discovery",
payload={"endpoints": discovery_endpoints},
)
@HANDLERS.register(("Alexa.Authorization", "AcceptGrant"))
async def async_api_accept_grant(hass, config, directive, context):
"""Create a API formatted AcceptGrant response.
Async friendly.
"""
auth_code = directive.payload["grant"]["code"]
_LOGGER.debug("AcceptGrant code: %s", auth_code)
if config.supports_auth:
await config.async_accept_grant(auth_code)
if config.should_report_state:
await async_enable_proactive_mode(hass, config)
return directive.response(
name="AcceptGrant.Response", namespace="Alexa.Authorization", payload={}
)
@HANDLERS.register(("Alexa.PowerController", "TurnOn"))
async def async_api_turn_on(hass, config, directive, context):
"""Process a turn on request."""
entity = directive.entity
domain = entity.domain
if domain == group.DOMAIN:
domain = ha.DOMAIN
service = SERVICE_TURN_ON
if domain == cover.DOMAIN:
service = cover.SERVICE_OPEN_COVER
elif domain == vacuum.DOMAIN:
supported = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if not supported & vacuum.SUPPORT_TURN_ON and supported & vacuum.SUPPORT_START:
service = vacuum.SERVICE_START
elif domain == timer.DOMAIN:
service = timer.SERVICE_START
elif domain == media_player.DOMAIN:
supported = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
power_features = media_player.SUPPORT_TURN_ON | media_player.SUPPORT_TURN_OFF
if not supported & power_features:
service = media_player.SERVICE_MEDIA_PLAY
await hass.services.async_call(
domain,
service,
{ATTR_ENTITY_ID: entity.entity_id},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.PowerController", "TurnOff"))
async def async_api_turn_off(hass, config, directive, context):
"""Process a turn off request."""
entity = directive.entity
domain = entity.domain
if entity.domain == group.DOMAIN:
domain = ha.DOMAIN
service = SERVICE_TURN_OFF
if entity.domain == cover.DOMAIN:
service = cover.SERVICE_CLOSE_COVER
elif domain == vacuum.DOMAIN:
supported = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if (
not supported & vacuum.SUPPORT_TURN_OFF
and supported & vacuum.SUPPORT_RETURN_HOME
):
service = vacuum.SERVICE_RETURN_TO_BASE
elif domain == timer.DOMAIN:
service = timer.SERVICE_CANCEL
elif domain == media_player.DOMAIN:
supported = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
power_features = media_player.SUPPORT_TURN_ON | media_player.SUPPORT_TURN_OFF
if not supported & power_features:
service = media_player.SERVICE_MEDIA_STOP
await hass.services.async_call(
domain,
service,
{ATTR_ENTITY_ID: entity.entity_id},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.BrightnessController", "SetBrightness"))
async def async_api_set_brightness(hass, config, directive, context):
"""Process a set brightness request."""
entity = directive.entity
brightness = int(directive.payload["brightness"])
await hass.services.async_call(
entity.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id, light.ATTR_BRIGHTNESS_PCT: brightness},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.BrightnessController", "AdjustBrightness"))
async def async_api_adjust_brightness(hass, config, directive, context):
"""Process an adjust brightness request."""
entity = directive.entity
brightness_delta = int(directive.payload["brightnessDelta"])
# read current state
try:
current = math.floor(
int(entity.attributes.get(light.ATTR_BRIGHTNESS)) / 255 * 100
)
except ZeroDivisionError:
current = 0
# set brightness
brightness = max(0, brightness_delta + current)
await hass.services.async_call(
entity.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id, light.ATTR_BRIGHTNESS_PCT: brightness},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.ColorController", "SetColor"))
async def async_api_set_color(hass, config, directive, context):
"""Process a set color request."""
entity = directive.entity
rgb = color_util.color_hsb_to_RGB(
float(directive.payload["color"]["hue"]),
float(directive.payload["color"]["saturation"]),
float(directive.payload["color"]["brightness"]),
)
await hass.services.async_call(
entity.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id, light.ATTR_RGB_COLOR: rgb},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.ColorTemperatureController", "SetColorTemperature"))
async def async_api_set_color_temperature(hass, config, directive, context):
"""Process a set color temperature request."""
entity = directive.entity
kelvin = int(directive.payload["colorTemperatureInKelvin"])
await hass.services.async_call(
entity.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id, light.ATTR_KELVIN: kelvin},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.ColorTemperatureController", "DecreaseColorTemperature"))
async def async_api_decrease_color_temp(hass, config, directive, context):
"""Process a decrease color temperature request."""
entity = directive.entity
current = int(entity.attributes.get(light.ATTR_COLOR_TEMP))
max_mireds = int(entity.attributes.get(light.ATTR_MAX_MIREDS))
value = min(max_mireds, current + 50)
await hass.services.async_call(
entity.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id, light.ATTR_COLOR_TEMP: value},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.ColorTemperatureController", "IncreaseColorTemperature"))
async def async_api_increase_color_temp(hass, config, directive, context):
"""Process an increase color temperature request."""
entity = directive.entity
current = int(entity.attributes.get(light.ATTR_COLOR_TEMP))
min_mireds = int(entity.attributes.get(light.ATTR_MIN_MIREDS))
value = max(min_mireds, current - 50)
await hass.services.async_call(
entity.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id, light.ATTR_COLOR_TEMP: value},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.SceneController", "Activate"))
async def async_api_activate(hass, config, directive, context):
"""Process an activate request."""
entity = directive.entity
domain = entity.domain
await hass.services.async_call(
domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id},
blocking=False,
context=context,
)
payload = {
"cause": {"type": Cause.VOICE_INTERACTION},
"timestamp": f"{dt_util.utcnow().replace(tzinfo=None).isoformat()}Z",
}
return directive.response(
name="ActivationStarted", namespace="Alexa.SceneController", payload=payload
)
@HANDLERS.register(("Alexa.SceneController", "Deactivate"))
async def async_api_deactivate(hass, config, directive, context):
"""Process a deactivate request."""
entity = directive.entity
domain = entity.domain
await hass.services.async_call(
domain,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: entity.entity_id},
blocking=False,
context=context,
)
payload = {
"cause": {"type": Cause.VOICE_INTERACTION},
"timestamp": f"{dt_util.utcnow().replace(tzinfo=None).isoformat()}Z",
}
return directive.response(
name="DeactivationStarted", namespace="Alexa.SceneController", payload=payload
)
@HANDLERS.register(("Alexa.PercentageController", "SetPercentage"))
async def async_api_set_percentage(hass, config, directive, context):
"""Process a set percentage request."""
entity = directive.entity
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.domain == fan.DOMAIN:
service = fan.SERVICE_SET_SPEED
speed = "off"
percentage = int(directive.payload["percentage"])
if percentage <= 33:
speed = "low"
elif percentage <= 66:
speed = "medium"
elif percentage <= 100:
speed = "high"
data[fan.ATTR_SPEED] = speed
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PercentageController", "AdjustPercentage"))
async def async_api_adjust_percentage(hass, config, directive, context):
"""Process an adjust percentage request."""
entity = directive.entity
percentage_delta = int(directive.payload["percentageDelta"])
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.domain == fan.DOMAIN:
service = fan.SERVICE_SET_SPEED
speed = entity.attributes.get(fan.ATTR_SPEED)
current = PERCENTAGE_FAN_MAP.get(speed, 100)
# set percentage
percentage = max(0, percentage_delta + current)
speed = "off"
if percentage <= 33:
speed = "low"
elif percentage <= 66:
speed = "medium"
elif percentage <= 100:
speed = "high"
data[fan.ATTR_SPEED] = speed
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.LockController", "Lock"))
async def async_api_lock(hass, config, directive, context):
"""Process a lock request."""
entity = directive.entity
await hass.services.async_call(
entity.domain,
SERVICE_LOCK,
{ATTR_ENTITY_ID: entity.entity_id},
blocking=False,
context=context,
)
response = directive.response()
response.add_context_property(
{"name": "lockState", "namespace": "Alexa.LockController", "value": "LOCKED"}
)
return response
@HANDLERS.register(("Alexa.LockController", "Unlock"))
async def async_api_unlock(hass, config, directive, context):
"""Process an unlock request."""
if config.locale not in {"de-DE", "en-US", "ja-JP"}:
msg = f"The unlock directive is not supported for the following locales: {config.locale}"
raise AlexaInvalidDirectiveError(msg)
entity = directive.entity
await hass.services.async_call(
entity.domain,
SERVICE_UNLOCK,
{ATTR_ENTITY_ID: entity.entity_id},
blocking=False,
context=context,
)
response = directive.response()
response.add_context_property(
{"namespace": "Alexa.LockController", "name": "lockState", "value": "UNLOCKED"}
)
return response
@HANDLERS.register(("Alexa.Speaker", "SetVolume"))
async def async_api_set_volume(hass, config, directive, context):
"""Process a set volume request."""
volume = round(float(directive.payload["volume"] / 100), 2)
entity = directive.entity
data = {
ATTR_ENTITY_ID: entity.entity_id,
media_player.const.ATTR_MEDIA_VOLUME_LEVEL: volume,
}
await hass.services.async_call(
entity.domain, SERVICE_VOLUME_SET, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.InputController", "SelectInput"))
async def async_api_select_input(hass, config, directive, context):
"""Process a set input request."""
media_input = directive.payload["input"]
entity = directive.entity
# Attempt to map the ALL UPPERCASE payload name to a source.
# Strips trailing 1 to match single input devices.
source_list = entity.attributes.get(media_player.const.ATTR_INPUT_SOURCE_LIST, [])
for source in source_list:
formatted_source = (
source.lower().replace("-", "").replace("_", "").replace(" ", "")
)
media_input = media_input.lower().replace(" ", "")
if (
formatted_source in Inputs.VALID_SOURCE_NAME_MAP
and formatted_source == media_input
) or (
media_input.endswith("1") and formatted_source == media_input.rstrip("1")
):
media_input = source
break
else:
msg = (
f"failed to map input {media_input} to a media source on {entity.entity_id}"
)
raise AlexaInvalidValueError(msg)
data = {
ATTR_ENTITY_ID: entity.entity_id,
media_player.const.ATTR_INPUT_SOURCE: media_input,
}
await hass.services.async_call(
entity.domain,
media_player.SERVICE_SELECT_SOURCE,
data,
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.Speaker", "AdjustVolume"))
async def async_api_adjust_volume(hass, config, directive, context):
"""Process an adjust volume request."""
volume_delta = int(directive.payload["volume"])
entity = directive.entity
current_level = entity.attributes.get(media_player.const.ATTR_MEDIA_VOLUME_LEVEL)
# read current state
try:
current = math.floor(int(current_level * 100))
except ZeroDivisionError:
current = 0
volume = float(max(0, volume_delta + current) / 100)
data = {
ATTR_ENTITY_ID: entity.entity_id,
media_player.const.ATTR_MEDIA_VOLUME_LEVEL: volume,
}
await hass.services.async_call(
entity.domain, SERVICE_VOLUME_SET, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.StepSpeaker", "AdjustVolume"))
async def async_api_adjust_volume_step(hass, config, directive, context):
"""Process an adjust volume step request."""
# media_player volume up/down service does not support specifying steps
# each component handles it differently e.g. via config.
# This workaround will simply call the volume up/Volume down the amount of steps asked for
# When no steps are called in the request, Alexa sends a default of 10 steps which for most
# purposes is too high. The default is set 1 in this case.
entity = directive.entity
volume_int = int(directive.payload["volumeSteps"])
is_default = bool(directive.payload["volumeStepsDefault"])
default_steps = 1
if volume_int < 0:
service_volume = SERVICE_VOLUME_DOWN
if is_default:
volume_int = -default_steps
else:
service_volume = SERVICE_VOLUME_UP
if is_default:
volume_int = default_steps
data = {ATTR_ENTITY_ID: entity.entity_id}
for _ in range(abs(volume_int)):
await hass.services.async_call(
entity.domain, service_volume, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.StepSpeaker", "SetMute"))
@HANDLERS.register(("Alexa.Speaker", "SetMute"))
async def async_api_set_mute(hass, config, directive, context):
"""Process a set mute request."""
mute = bool(directive.payload["mute"])
entity = directive.entity
data = {
ATTR_ENTITY_ID: entity.entity_id,
media_player.const.ATTR_MEDIA_VOLUME_MUTED: mute,
}
await hass.services.async_call(
entity.domain, SERVICE_VOLUME_MUTE, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PlaybackController", "Play"))
async def async_api_play(hass, config, directive, context):
"""Process a play request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
await hass.services.async_call(
entity.domain, SERVICE_MEDIA_PLAY, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PlaybackController", "Pause"))
async def async_api_pause(hass, config, directive, context):
"""Process a pause request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
await hass.services.async_call(
entity.domain, SERVICE_MEDIA_PAUSE, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PlaybackController", "Stop"))
async def async_api_stop(hass, config, directive, context):
"""Process a stop request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
await hass.services.async_call(
entity.domain, SERVICE_MEDIA_STOP, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PlaybackController", "Next"))
async def async_api_next(hass, config, directive, context):
"""Process a next request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
await hass.services.async_call(
entity.domain, SERVICE_MEDIA_NEXT_TRACK, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PlaybackController", "Previous"))
async def async_api_previous(hass, config, directive, context):
"""Process a previous request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
await hass.services.async_call(
entity.domain,
SERVICE_MEDIA_PREVIOUS_TRACK,
data,
blocking=False,
context=context,
)
return directive.response()
def temperature_from_object(hass, temp_obj, interval=False):
"""Get temperature from Temperature object in requested unit."""
to_unit = hass.config.units.temperature_unit
from_unit = TEMP_CELSIUS
temp = float(temp_obj["value"])
if temp_obj["scale"] == "FAHRENHEIT":
from_unit = TEMP_FAHRENHEIT
elif temp_obj["scale"] == "KELVIN":
# convert to Celsius if absolute temperature
if not interval:
temp -= 273.15
return convert_temperature(temp, from_unit, to_unit, interval)
@HANDLERS.register(("Alexa.ThermostatController", "SetTargetTemperature"))
async def async_api_set_target_temp(hass, config, directive, context):
"""Process a set target temperature request."""
entity = directive.entity
min_temp = entity.attributes.get(climate.ATTR_MIN_TEMP)
max_temp = entity.attributes.get(climate.ATTR_MAX_TEMP)
unit = hass.config.units.temperature_unit
data = {ATTR_ENTITY_ID: entity.entity_id}
payload = directive.payload
response = directive.response()
if "targetSetpoint" in payload:
temp = temperature_from_object(hass, payload["targetSetpoint"])
if temp < min_temp or temp > max_temp:
raise AlexaTempRangeError(hass, temp, min_temp, max_temp)
data[ATTR_TEMPERATURE] = temp
response.add_context_property(
{
"name": "targetSetpoint",
"namespace": "Alexa.ThermostatController",
"value": {"value": temp, "scale": API_TEMP_UNITS[unit]},
}
)
if "lowerSetpoint" in payload:
temp_low = temperature_from_object(hass, payload["lowerSetpoint"])
if temp_low < min_temp or temp_low > max_temp:
raise AlexaTempRangeError(hass, temp_low, min_temp, max_temp)
data[climate.ATTR_TARGET_TEMP_LOW] = temp_low
response.add_context_property(
{
"name": "lowerSetpoint",
"namespace": "Alexa.ThermostatController",
"value": {"value": temp_low, "scale": API_TEMP_UNITS[unit]},
}
)
if "upperSetpoint" in payload:
temp_high = temperature_from_object(hass, payload["upperSetpoint"])
if temp_high < min_temp or temp_high > max_temp:
raise AlexaTempRangeError(hass, temp_high, min_temp, max_temp)
data[climate.ATTR_TARGET_TEMP_HIGH] = temp_high
response.add_context_property(
{
"name": "upperSetpoint",
"namespace": "Alexa.ThermostatController",
"value": {"value": temp_high, "scale": API_TEMP_UNITS[unit]},
}
)
await hass.services.async_call(
entity.domain,
climate.SERVICE_SET_TEMPERATURE,
data,
blocking=False,
context=context,
)
return response
@HANDLERS.register(("Alexa.ThermostatController", "AdjustTargetTemperature"))
async def async_api_adjust_target_temp(hass, config, directive, context):
"""Process an adjust target temperature request."""
entity = directive.entity
min_temp = entity.attributes.get(climate.ATTR_MIN_TEMP)
max_temp = entity.attributes.get(climate.ATTR_MAX_TEMP)
unit = hass.config.units.temperature_unit
temp_delta = temperature_from_object(
hass, directive.payload["targetSetpointDelta"], interval=True
)
target_temp = float(entity.attributes.get(ATTR_TEMPERATURE)) + temp_delta
if target_temp < min_temp or target_temp > max_temp:
raise AlexaTempRangeError(hass, target_temp, min_temp, max_temp)
data = {ATTR_ENTITY_ID: entity.entity_id, ATTR_TEMPERATURE: target_temp}
response = directive.response()
await hass.services.async_call(
entity.domain,
climate.SERVICE_SET_TEMPERATURE,
data,
blocking=False,
context=context,
)
response.add_context_property(
{
"name": "targetSetpoint",
"namespace": "Alexa.ThermostatController",
"value": {"value": target_temp, "scale": API_TEMP_UNITS[unit]},
}
)
return response
@HANDLERS.register(("Alexa.ThermostatController", "SetThermostatMode"))
async def async_api_set_thermostat_mode(hass, config, directive, context):
"""Process a set thermostat mode request."""
entity = directive.entity
mode = directive.payload["thermostatMode"]
mode = mode if isinstance(mode, str) else mode["value"]
data = {ATTR_ENTITY_ID: entity.entity_id}
ha_preset = next((k for k, v in API_THERMOSTAT_PRESETS.items() if v == mode), None)
if ha_preset:
presets = entity.attributes.get(climate.ATTR_PRESET_MODES, [])
if ha_preset not in presets:
msg = f"The requested thermostat mode {ha_preset} is not supported"
raise AlexaUnsupportedThermostatModeError(msg)
service = climate.SERVICE_SET_PRESET_MODE
data[climate.ATTR_PRESET_MODE] = ha_preset
elif mode == "CUSTOM":
operation_list = entity.attributes.get(climate.ATTR_HVAC_MODES)
custom_mode = directive.payload["thermostatMode"]["customName"]
custom_mode = next(
(k for k, v in API_THERMOSTAT_MODES_CUSTOM.items() if v == custom_mode),
None,
)
if custom_mode not in operation_list:
msg = (
f"The requested thermostat mode {mode}: {custom_mode} is not supported"
)
raise AlexaUnsupportedThermostatModeError(msg)
service = climate.SERVICE_SET_HVAC_MODE
data[climate.ATTR_HVAC_MODE] = custom_mode
else:
operation_list = entity.attributes.get(climate.ATTR_HVAC_MODES)
ha_modes = {k: v for k, v in API_THERMOSTAT_MODES.items() if v == mode}
ha_mode = next(iter(set(ha_modes).intersection(operation_list)), None)
if ha_mode not in operation_list:
msg = f"The requested thermostat mode {mode} is not supported"
raise AlexaUnsupportedThermostatModeError(msg)
service = climate.SERVICE_SET_HVAC_MODE
data[climate.ATTR_HVAC_MODE] = ha_mode
response = directive.response()
await hass.services.async_call(
climate.DOMAIN, service, data, blocking=False, context=context
)
response.add_context_property(
{
"name": "thermostatMode",
"namespace": "Alexa.ThermostatController",
"value": mode,
}
)
return response
@HANDLERS.register(("Alexa", "ReportState"))
async def async_api_reportstate(hass, config, directive, context):
"""Process a ReportState request."""
return directive.response(name="StateReport")
@HANDLERS.register(("Alexa.PowerLevelController", "SetPowerLevel"))
async def async_api_set_power_level(hass, config, directive, context):
"""Process a SetPowerLevel request."""
entity = directive.entity
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.domain == fan.DOMAIN:
service = fan.SERVICE_SET_SPEED
speed = "off"
percentage = int(directive.payload["powerLevel"])
if percentage <= 33:
speed = "low"
elif percentage <= 66:
speed = "medium"
else:
speed = "high"
data[fan.ATTR_SPEED] = speed
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PowerLevelController", "AdjustPowerLevel"))
async def async_api_adjust_power_level(hass, config, directive, context):
"""Process an AdjustPowerLevel request."""
entity = directive.entity
percentage_delta = int(directive.payload["powerLevelDelta"])
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.domain == fan.DOMAIN:
service = fan.SERVICE_SET_SPEED
speed = entity.attributes.get(fan.ATTR_SPEED)
current = PERCENTAGE_FAN_MAP.get(speed, 100)
# set percentage
percentage = max(0, percentage_delta + current)
speed = "off"
if percentage <= 33:
speed = "low"
elif percentage <= 66:
speed = "medium"
else:
speed = "high"
data[fan.ATTR_SPEED] = speed
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.SecurityPanelController", "Arm"))
async def async_api_arm(hass, config, directive, context):
"""Process a Security Panel Arm request."""
entity = directive.entity
service = None
arm_state = directive.payload["armState"]
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.state != STATE_ALARM_DISARMED:
msg = "You must disarm the system before you can set the requested arm state."
raise AlexaSecurityPanelAuthorizationRequired(msg)
if arm_state == "ARMED_AWAY":
service = SERVICE_ALARM_ARM_AWAY
elif arm_state == "ARMED_NIGHT":
service = SERVICE_ALARM_ARM_NIGHT
elif arm_state == "ARMED_STAY":
service = SERVICE_ALARM_ARM_HOME
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
# return 0 until alarm integration supports an exit delay
payload = {"exitDelayInSeconds": 0}
response = directive.response(
name="Arm.Response", namespace="Alexa.SecurityPanelController", payload=payload
)
response.add_context_property(
{
"name": "armState",
"namespace": "Alexa.SecurityPanelController",
"value": arm_state,
}
)
return response
@HANDLERS.register(("Alexa.SecurityPanelController", "Disarm"))
async def async_api_disarm(hass, config, directive, context):
"""Process a Security Panel Disarm request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
response = directive.response()
# Per Alexa Documentation: If you receive a Disarm directive, and the system is already disarmed,
# respond with a success response, not an error response.
if entity.state == STATE_ALARM_DISARMED:
return response
payload = directive.payload
if "authorization" in payload:
value = payload["authorization"]["value"]
if payload["authorization"]["type"] == "FOUR_DIGIT_PIN":
data["code"] = value
if not await hass.services.async_call(
entity.domain, SERVICE_ALARM_DISARM, data, blocking=True, context=context
):
msg = "Invalid Code"
raise AlexaSecurityPanelUnauthorizedError(msg)
response.add_context_property(
{
"name": "armState",
"namespace": "Alexa.SecurityPanelController",
"value": "DISARMED",
}
)
return response
@HANDLERS.register(("Alexa.ModeController", "SetMode"))
async def async_api_set_mode(hass, config, directive, context):
"""Process a SetMode directive."""
entity = directive.entity
instance = directive.instance
domain = entity.domain
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
mode = directive.payload["mode"]
# Fan Direction
if instance == f"{fan.DOMAIN}.{fan.ATTR_DIRECTION}":
direction = mode.split(".")[1]
if direction in (fan.DIRECTION_REVERSE, fan.DIRECTION_FORWARD):
service = fan.SERVICE_SET_DIRECTION
data[fan.ATTR_DIRECTION] = direction
# Cover Position
elif instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
position = mode.split(".")[1]
if position == cover.STATE_CLOSED:
service = cover.SERVICE_CLOSE_COVER
elif position == cover.STATE_OPEN:
service = cover.SERVICE_OPEN_COVER
elif position == "custom":
service = cover.SERVICE_STOP_COVER
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
domain, service, data, blocking=False, context=context
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.ModeController",
"instance": instance,
"name": "mode",
"value": mode,
}
)
return response
@HANDLERS.register(("Alexa.ModeController", "AdjustMode"))
async def async_api_adjust_mode(hass, config, directive, context):
"""Process a AdjustMode request.
Requires capabilityResources supportedModes to be ordered.
Only supportedModes with ordered=True support the adjustMode directive.
"""
# Currently no supportedModes are configured with ordered=True to support this request.
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
@HANDLERS.register(("Alexa.ToggleController", "TurnOn"))
async def async_api_toggle_on(hass, config, directive, context):
"""Process a toggle on request."""
entity = directive.entity
instance = directive.instance
domain = entity.domain
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
# Fan Oscillating
if instance == f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}":
service = fan.SERVICE_OSCILLATE
data[fan.ATTR_OSCILLATING] = True
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
domain, service, data, blocking=False, context=context
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.ToggleController",
"instance": instance,
"name": "toggleState",
"value": "ON",
}
)
return response
@HANDLERS.register(("Alexa.ToggleController", "TurnOff"))
async def async_api_toggle_off(hass, config, directive, context):
"""Process a toggle off request."""
entity = directive.entity
instance = directive.instance
domain = entity.domain
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
# Fan Oscillating
if instance == f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}":
service = fan.SERVICE_OSCILLATE
data[fan.ATTR_OSCILLATING] = False
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
domain, service, data, blocking=False, context=context
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.ToggleController",
"instance": instance,
"name": "toggleState",
"value": "OFF",
}
)
return response
@HANDLERS.register(("Alexa.RangeController", "SetRangeValue"))
async def async_api_set_range(hass, config, directive, context):
"""Process a next request."""
entity = directive.entity
instance = directive.instance
domain = entity.domain
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
range_value = directive.payload["rangeValue"]
# Fan Speed
if instance == f"{fan.DOMAIN}.{fan.ATTR_SPEED}":
range_value = int(range_value)
service = fan.SERVICE_SET_SPEED
speed_list = entity.attributes[fan.ATTR_SPEED_LIST]
speed = next((v for i, v in enumerate(speed_list) if i == range_value), None)
if not speed:
msg = "Entity does not support value"
raise AlexaInvalidValueError(msg)
if speed == fan.SPEED_OFF:
service = fan.SERVICE_TURN_OFF
data[fan.ATTR_SPEED] = speed
# Cover Position
elif instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
range_value = int(range_value)
if range_value == 0:
service = cover.SERVICE_CLOSE_COVER
elif range_value == 100:
service = cover.SERVICE_OPEN_COVER
else:
service = cover.SERVICE_SET_COVER_POSITION
data[cover.ATTR_POSITION] = range_value
# Cover Tilt
elif instance == f"{cover.DOMAIN}.tilt":
range_value = int(range_value)
if range_value == 0:
service = cover.SERVICE_CLOSE_COVER_TILT
elif range_value == 100:
service = cover.SERVICE_OPEN_COVER_TILT
else:
service = cover.SERVICE_SET_COVER_TILT_POSITION
data[cover.ATTR_TILT_POSITION] = range_value
# Input Number Value
elif instance == f"{input_number.DOMAIN}.{input_number.ATTR_VALUE}":
range_value = float(range_value)
service = input_number.SERVICE_SET_VALUE
min_value = float(entity.attributes[input_number.ATTR_MIN])
max_value = float(entity.attributes[input_number.ATTR_MAX])
data[input_number.ATTR_VALUE] = min(max_value, max(min_value, range_value))
# Vacuum Fan Speed
elif instance == f"{vacuum.DOMAIN}.{vacuum.ATTR_FAN_SPEED}":
service = vacuum.SERVICE_SET_FAN_SPEED
speed_list = entity.attributes[vacuum.ATTR_FAN_SPEED_LIST]
speed = next(
(v for i, v in enumerate(speed_list) if i == int(range_value)), None
)
if not speed:
msg = "Entity does not support value"
raise AlexaInvalidValueError(msg)
data[vacuum.ATTR_FAN_SPEED] = speed
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
domain, service, data, blocking=False, context=context
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.RangeController",
"instance": instance,
"name": "rangeValue",
"value": range_value,
}
)
return response
@HANDLERS.register(("Alexa.RangeController", "AdjustRangeValue"))
async def async_api_adjust_range(hass, config, directive, context):
"""Process a next request."""
entity = directive.entity
instance = directive.instance
domain = entity.domain
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
range_delta = directive.payload["rangeValueDelta"]
range_delta_default = bool(directive.payload["rangeValueDeltaDefault"])
response_value = 0
# Fan Speed
if instance == f"{fan.DOMAIN}.{fan.ATTR_SPEED}":
range_delta = int(range_delta)
service = fan.SERVICE_SET_SPEED
speed_list = entity.attributes[fan.ATTR_SPEED_LIST]
current_speed = entity.attributes[fan.ATTR_SPEED]
current_speed_index = next(
(i for i, v in enumerate(speed_list) if v == current_speed), 0
)
new_speed_index = min(
len(speed_list) - 1, max(0, current_speed_index + range_delta)
)
speed = next(
(v for i, v in enumerate(speed_list) if i == new_speed_index), None
)
if speed == fan.SPEED_OFF:
service = fan.SERVICE_TURN_OFF
data[fan.ATTR_SPEED] = response_value = speed
# Cover Position
elif instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
range_delta = int(range_delta * 20) if range_delta_default else int(range_delta)
service = SERVICE_SET_COVER_POSITION
current = entity.attributes.get(cover.ATTR_POSITION)
if not current:
msg = f"Unable to determine {entity.entity_id} current position"
raise AlexaInvalidValueError(msg)
position = response_value = min(100, max(0, range_delta + current))
if position == 100:
service = cover.SERVICE_OPEN_COVER
elif position == 0:
service = cover.SERVICE_CLOSE_COVER
else:
data[cover.ATTR_POSITION] = position
# Cover Tilt
elif instance == f"{cover.DOMAIN}.tilt":
range_delta = int(range_delta * 20) if range_delta_default else int(range_delta)
service = SERVICE_SET_COVER_TILT_POSITION
current = entity.attributes.get(cover.ATTR_TILT_POSITION)
if not current:
msg = f"Unable to determine {entity.entity_id} current tilt position"
raise AlexaInvalidValueError(msg)
tilt_position = response_value = min(100, max(0, range_delta + current))
if tilt_position == 100:
service = cover.SERVICE_OPEN_COVER_TILT
elif tilt_position == 0:
service = cover.SERVICE_CLOSE_COVER_TILT
else:
data[cover.ATTR_TILT_POSITION] = tilt_position
# Input Number Value
elif instance == f"{input_number.DOMAIN}.{input_number.ATTR_VALUE}":
range_delta = float(range_delta)
service = input_number.SERVICE_SET_VALUE
min_value = float(entity.attributes[input_number.ATTR_MIN])
max_value = float(entity.attributes[input_number.ATTR_MAX])
current = float(entity.state)
data[input_number.ATTR_VALUE] = response_value = min(
max_value, max(min_value, range_delta + current)
)
# Vacuum Fan Speed
elif instance == f"{vacuum.DOMAIN}.{vacuum.ATTR_FAN_SPEED}":
range_delta = int(range_delta)
service = vacuum.SERVICE_SET_FAN_SPEED
speed_list = entity.attributes[vacuum.ATTR_FAN_SPEED_LIST]
current_speed = entity.attributes[vacuum.ATTR_FAN_SPEED]
current_speed_index = next(
(i for i, v in enumerate(speed_list) if v == current_speed), 0
)
new_speed_index = min(
len(speed_list) - 1, max(0, current_speed_index + range_delta)
)
speed = next(
(v for i, v in enumerate(speed_list) if i == new_speed_index), None
)
data[vacuum.ATTR_FAN_SPEED] = response_value = speed
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
domain, service, data, blocking=False, context=context
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.RangeController",
"instance": instance,
"name": "rangeValue",
"value": response_value,
}
)
return response
@HANDLERS.register(("Alexa.ChannelController", "ChangeChannel"))
async def async_api_changechannel(hass, config, directive, context):
"""Process a change channel request."""
channel = "0"
entity = directive.entity
channel_payload = directive.payload["channel"]
metadata_payload = directive.payload["channelMetadata"]
payload_name = "number"
if "number" in channel_payload:
channel = channel_payload["number"]
payload_name = "number"
elif "callSign" in channel_payload:
channel = channel_payload["callSign"]
payload_name = "callSign"
elif "affiliateCallSign" in channel_payload:
channel = channel_payload["affiliateCallSign"]
payload_name = "affiliateCallSign"
elif "uri" in channel_payload:
channel = channel_payload["uri"]
payload_name = "uri"
elif "name" in metadata_payload:
channel = metadata_payload["name"]
payload_name = "callSign"
data = {
ATTR_ENTITY_ID: entity.entity_id,
media_player.const.ATTR_MEDIA_CONTENT_ID: channel,
media_player.const.ATTR_MEDIA_CONTENT_TYPE: media_player.const.MEDIA_TYPE_CHANNEL,
}
await hass.services.async_call(
entity.domain,
media_player.const.SERVICE_PLAY_MEDIA,
data,
blocking=False,
context=context,
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.ChannelController",
"name": "channel",
"value": {payload_name: channel},
}
)
return response
@HANDLERS.register(("Alexa.ChannelController", "SkipChannels"))
async def async_api_skipchannel(hass, config, directive, context):
"""Process a skipchannel request."""
channel = int(directive.payload["channelCount"])
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
if channel < 0:
service_media = SERVICE_MEDIA_PREVIOUS_TRACK
else:
service_media = SERVICE_MEDIA_NEXT_TRACK
for _ in range(abs(channel)):
await hass.services.async_call(
entity.domain, service_media, data, blocking=False, context=context
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.ChannelController",
"name": "channel",
"value": {"number": ""},
}
)
return response
@HANDLERS.register(("Alexa.SeekController", "AdjustSeekPosition"))
async def async_api_seek(hass, config, directive, context):
"""Process a seek request."""
entity = directive.entity
position_delta = int(directive.payload["deltaPositionMilliseconds"])
current_position = entity.attributes.get(media_player.ATTR_MEDIA_POSITION)
if not current_position:
msg = f"{entity} did not return the current media position."
raise AlexaVideoActionNotPermittedForContentError(msg)
seek_position = int(current_position) + int(position_delta / 1000)
if seek_position < 0:
seek_position = 0
media_duration = entity.attributes.get(media_player.ATTR_MEDIA_DURATION)
if media_duration and 0 < int(media_duration) < seek_position:
seek_position = media_duration
data = {
ATTR_ENTITY_ID: entity.entity_id,
media_player.ATTR_MEDIA_SEEK_POSITION: seek_position,
}
await hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_MEDIA_SEEK,
data,
blocking=False,
context=context,
)
# convert seconds to milliseconds for StateReport.
seek_position = int(seek_position * 1000)
payload = {"properties": [{"name": "positionMilliseconds", "value": seek_position}]}
return directive.response(
name="StateReport", namespace="Alexa.SeekController", payload=payload
)
@HANDLERS.register(("Alexa.EqualizerController", "SetMode"))
async def async_api_set_eq_mode(hass, config, directive, context):
"""Process a SetMode request for EqualizerController."""
mode = directive.payload["mode"]
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
sound_mode_list = entity.attributes.get(media_player.const.ATTR_SOUND_MODE_LIST)
if sound_mode_list and mode.lower() in sound_mode_list:
data[media_player.const.ATTR_SOUND_MODE] = mode.lower()
else:
msg = f"failed to map sound mode {mode} to a mode on {entity.entity_id}"
raise AlexaInvalidValueError(msg)
await hass.services.async_call(
entity.domain,
media_player.SERVICE_SELECT_SOUND_MODE,
data,
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.EqualizerController", "AdjustBands"))
@HANDLERS.register(("Alexa.EqualizerController", "ResetBands"))
@HANDLERS.register(("Alexa.EqualizerController", "SetBands"))
async def async_api_bands_directive(hass, config, directive, context):
"""Handle an AdjustBands, ResetBands, SetBands request.
Only mode directives are currently supported for the EqualizerController.
"""
# Currently bands directives are not supported.
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
@HANDLERS.register(("Alexa.TimeHoldController", "Hold"))
async def async_api_hold(hass, config, directive, context):
"""Process a TimeHoldController Hold request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.domain == timer.DOMAIN:
service = timer.SERVICE_PAUSE
elif entity.domain == vacuum.DOMAIN:
service = vacuum.SERVICE_START_PAUSE
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.TimeHoldController", "Resume"))
async def async_api_resume(hass, config, directive, context):
"""Process a TimeHoldController Resume request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.domain == timer.DOMAIN:
service = timer.SERVICE_START
elif entity.domain == vacuum.DOMAIN:
service = vacuum.SERVICE_START_PAUSE
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.CameraStreamController", "InitializeCameraStreams"))
async def async_api_initialize_camera_stream(hass, config, directive, context):
"""Process a InitializeCameraStreams request."""
entity = directive.entity
stream_source = await camera.async_request_stream(hass, entity.entity_id, fmt="hls")
camera_image = hass.states.get(entity.entity_id).attributes[ATTR_ENTITY_PICTURE]
try:
external_url = network.get_url(
hass,
allow_internal=False,
allow_ip=False,
require_ssl=True,
require_standard_port=True,
)
except network.NoURLAvailableError as err:
raise AlexaInvalidValueError(
"Failed to find suitable URL to serve to Alexa"
) from err
payload = {
"cameraStreams": [
{
"uri": f"{external_url}{stream_source}",
"protocol": "HLS",
"resolution": {"width": 1280, "height": 720},
"authorizationType": "NONE",
"videoCodec": "H264",
"audioCodec": "AAC",
}
],
"imageUri": f"{external_url}{camera_image}",
}
return directive.response(
name="Response", namespace="Alexa.CameraStreamController", payload=payload
)
| {
"repo_name": "turbokongen/home-assistant",
"path": "homeassistant/components/alexa/handlers.py",
"copies": "3",
"size": "50582",
"license": "apache-2.0",
"hash": 4335843287752829400,
"line_mean": 31.3001277139,
"line_max": 101,
"alpha_frac": 0.6453679174,
"autogenerated": false,
"ratio": 3.912895490059565,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6058263407459565,
"avg_score": null,
"num_lines": null
} |
"""Alexa models."""
import logging
from uuid import uuid4
from .const import (
API_CONTEXT,
API_DIRECTIVE,
API_ENDPOINT,
API_EVENT,
API_HEADER,
API_PAYLOAD,
API_SCOPE,
)
from .entities import ENTITY_ADAPTERS
from .errors import AlexaInvalidEndpointError
_LOGGER = logging.getLogger(__name__)
class AlexaDirective:
"""An incoming Alexa directive."""
def __init__(self, request):
"""Initialize a directive."""
self._directive = request[API_DIRECTIVE]
self.namespace = self._directive[API_HEADER]["namespace"]
self.name = self._directive[API_HEADER]["name"]
self.payload = self._directive[API_PAYLOAD]
self.has_endpoint = API_ENDPOINT in self._directive
self.entity = self.entity_id = self.endpoint = None
def load_entity(self, hass, config):
"""Set attributes related to the entity for this request.
Sets these attributes when self.has_endpoint is True:
- entity
- entity_id
- endpoint
Behavior when self.has_endpoint is False is undefined.
Will raise AlexaInvalidEndpointError if the endpoint in the request is
malformed or nonexistant.
"""
_endpoint_id = self._directive[API_ENDPOINT]["endpointId"]
self.entity_id = _endpoint_id.replace("#", ".")
self.entity = hass.states.get(self.entity_id)
if not self.entity or not config.should_expose(self.entity_id):
raise AlexaInvalidEndpointError(_endpoint_id)
self.endpoint = ENTITY_ADAPTERS[self.entity.domain](hass, config, self.entity)
def response(self, name="Response", namespace="Alexa", payload=None):
"""Create an API formatted response.
Async friendly.
"""
response = AlexaResponse(name, namespace, payload)
token = self._directive[API_HEADER].get("correlationToken")
if token:
response.set_correlation_token(token)
if self.has_endpoint:
response.set_endpoint(self._directive[API_ENDPOINT].copy())
return response
def error(
self,
namespace="Alexa",
error_type="INTERNAL_ERROR",
error_message="",
payload=None,
):
"""Create a API formatted error response.
Async friendly.
"""
payload = payload or {}
payload["type"] = error_type
payload["message"] = error_message
_LOGGER.info(
"Request %s/%s error %s: %s",
self._directive[API_HEADER]["namespace"],
self._directive[API_HEADER]["name"],
error_type,
error_message,
)
return self.response(name="ErrorResponse", namespace=namespace, payload=payload)
class AlexaResponse:
"""Class to hold a response."""
def __init__(self, name, namespace, payload=None):
"""Initialize the response."""
payload = payload or {}
self._response = {
API_EVENT: {
API_HEADER: {
"namespace": namespace,
"name": name,
"messageId": str(uuid4()),
"payloadVersion": "3",
},
API_PAYLOAD: payload,
}
}
@property
def name(self):
"""Return the name of this response."""
return self._response[API_EVENT][API_HEADER]["name"]
@property
def namespace(self):
"""Return the namespace of this response."""
return self._response[API_EVENT][API_HEADER]["namespace"]
def set_correlation_token(self, token):
"""Set the correlationToken.
This should normally mirror the value from a request, and is set by
AlexaDirective.response() usually.
"""
self._response[API_EVENT][API_HEADER]["correlationToken"] = token
def set_endpoint_full(self, bearer_token, endpoint_id, cookie=None):
"""Set the endpoint dictionary.
This is used to send proactive messages to Alexa.
"""
self._response[API_EVENT][API_ENDPOINT] = {
API_SCOPE: {"type": "BearerToken", "token": bearer_token}
}
if endpoint_id is not None:
self._response[API_EVENT][API_ENDPOINT]["endpointId"] = endpoint_id
if cookie is not None:
self._response[API_EVENT][API_ENDPOINT]["cookie"] = cookie
def set_endpoint(self, endpoint):
"""Set the endpoint.
This should normally mirror the value from a request, and is set by
AlexaDirective.response() usually.
"""
self._response[API_EVENT][API_ENDPOINT] = endpoint
def _properties(self):
context = self._response.setdefault(API_CONTEXT, {})
return context.setdefault("properties", [])
def add_context_property(self, prop):
"""Add a property to the response context.
The Alexa response includes a list of properties which provides
feedback on how states have changed. For example if a user asks,
"Alexa, set thermostat to 20 degrees", the API expects a response with
the new value of the property, and Alexa will respond to the user
"Thermostat set to 20 degrees".
async_handle_message() will call .merge_context_properties() for every
request automatically, however often handlers will call services to
change state but the effects of those changes are applied
asynchronously. Thus, handlers should call this method to confirm
changes before returning.
"""
self._properties().append(prop)
def merge_context_properties(self, endpoint):
"""Add all properties from given endpoint if not already set.
Handlers should be using .add_context_property().
"""
properties = self._properties()
already_set = {(p["namespace"], p["name"]) for p in properties}
for prop in endpoint.serialize_properties():
if (prop["namespace"], prop["name"]) not in already_set:
self.add_context_property(prop)
def serialize(self):
"""Return response as a JSON-able data structure."""
return self._response
| {
"repo_name": "fbradyirl/home-assistant",
"path": "homeassistant/components/alexa/messages.py",
"copies": "2",
"size": "6215",
"license": "apache-2.0",
"hash": 3833510430703904300,
"line_mean": 31.3697916667,
"line_max": 88,
"alpha_frac": 0.6077232502,
"autogenerated": false,
"ratio": 4.461593682699211,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.606931693289921,
"avg_score": null,
"num_lines": null
} |
## Alexander Farrell, Defne Surujon
## This program takes an input GBK file and writes a list of noralization genes
## Normalization genes for TnSeq are those that have the product annotation
## "Transposase" or "Mobile element"
import os
import csv
from optparse import OptionParser
options = OptionParser(usage='%prog -i input',
description="Specify input gbk file and output file")
options.add_option("-i","--infile",dest="inputfile",
help="Input file (.gbk)")
options.add_option("--old",dest="old_tags",
action="store_true", default=False,
help="specify if old locus tags should be retrieved.")
# Read gbk file and return a dictionary of locus tag, product entries
def readgbkprod(filename,get_old_tags):
if get_old_tags==True:
loc_tag_flag="/old_locus_tag"
else:
loc_tag_flag= "/locus_tag"
f=open(filename)
lines=f.readlines()
f.close()
tags={}
i=0
prod=""
SP=""
count=0
for line in lines:
if i==1 and line[21]!= '/':
if "repeat_region" in line or " gene" in line:
prodnew = prod.replace("\"","")
prodfin = prodnew.replace("\n","")
count+=1
tags[SP] = prodfin
i=0
else:
prod = prod + line[20:]
if i==1 and '/' in line:
prodnew = prod.replace("\"","")
prodfin = prodnew.replace("\n","")
count+=1
tags[SP] = prodfin
i=0
if loc_tag_flag in line:
SP = line.split('"')[-2]
if "/product" in line:
prod = line[31:]
i=1
print ("There are " + str(count) + " genes total")
return tags
def makefasta(strain,prot,tags,otherfilename):
writer = open(otherfilename,"w")
for key, value in tags.items():
fastaheader=">tvo|"+strain+".[gene="+key+"] [protein="+value+"]\n"
writer.write(fastaheader)
if key in prot:
writer.write(prot[key]+"\n")
else:
writer.write("X\n")
writer.close()
def main():
opts, args = options.parse_args()
strainname=opts.inputfile.split("/")[-1]
strainname=strainname.split(".")[0]
locus = readgbkprod(opts.inputfile,opts.old_tags)
j=0
g=open(strainname+'.txt','w')
norm_gene_tags=["transposase","mobile element"]
for locustag in locus:
locus_description=locus[locustag].lower()
if norm_gene_tags[0] in locus_description or norm_gene_tags[1] in locus_description:
g.write(locustag+"\n")
j+=1
g.close()
print("There are ",str(j)," normalization genes")
if __name__ == '__main__':
main()
| {
"repo_name": "jsa-aerial/aerobio",
"path": "Scripts/get_norm_genes.py",
"copies": "1",
"size": "2675",
"license": "mit",
"hash": 5873572569557796000,
"line_mean": 26.4574468085,
"line_max": 100,
"alpha_frac": 0.5757009346,
"autogenerated": false,
"ratio": 3.1883194278903457,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4264020362490346,
"avg_score": null,
"num_lines": null
} |
"""alexandria URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
import library.views
urlpatterns = [
url(r'^$', library.views.index, name='index'),
url(r'^publish/$', library.views.publish, name='publish'),
url(r'^review/$', library.views.review, name='review'),
url(r'^review/(?P<post_id>[0-9]+)/response/$', library.views.review_response, name='review_response'),
url(r'^admin/', admin.site.urls),
]
| {
"repo_name": "baltzar/alexandria",
"path": "alexandria/urls.py",
"copies": "1",
"size": "1069",
"license": "mit",
"hash": 2155247259961657900,
"line_mean": 38.5925925926,
"line_max": 106,
"alpha_frac": 0.6875584659,
"autogenerated": false,
"ratio": 3.470779220779221,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9656606984013939,
"avg_score": 0.0003461405330564209,
"num_lines": 27
} |
# Alexa Personal Assistant Companion Program for Raspberry Pi
# Modified by Simon Beal and Matthew Timmons-Brown for "The Raspberry Pi Guy" YouTube channel
# Built upon the work of Sam Machin, (c)2016
# This is a library that includes all of the web functionality of the Alexa Amazon Echo personal assistant service
# The code here was originally in main.py, but has been abstracted for ease of use (you should not need to change it)
#! /usr/bin/env python
import os
import random
import time
import alsaaudio
import wave
import random
from creds import *
import requests
import json
import re
from memcache import Client
#Settings
device = "plughw:1" # Name of your microphone/soundcard in "arecord -L"
# Is your Amazon Echo clone not working? Perhaps the microphone is not connected properly or is not found at plughw:1
# Check and then modify this variable.
#Setup - details for Amazon server
recorded = False
servers = ["127.0.0.1:11211"]
mc = Client(servers, debug=1)
path = os.path.realpath(__file__).rstrip(os.path.basename(__file__))
# Check whether your Raspberry Pi is connected to the internet
def internet_on():
print "Checking Internet Connection"
try:
r =requests.get('https://api.amazon.com/auth/o2/token')
print "All systems GO"
return True
except:
print "Connection Failed"
return False
# Sends access token to Amazon - value sent is unique to each device - we do not advise you to share it
def gettoken():
token = mc.get("access_token")
refresh = refresh_token
if token:
return token
elif refresh:
payload = {"client_id" : Client_ID, "client_secret" : Client_Secret, "refresh_token" : refresh, "grant_type" : "refresh_token", }
url = "https://api.amazon.com/auth/o2/token"
r = requests.post(url, data = payload)
resp = json.loads(r.text)
mc.set("access_token", resp['access_token'], 3570)
return resp['access_token']
else:
return False
# Send the contents of "recording.wav" to Amazon's Alexa voice service
def alexa(sense):
url = 'https://access-alexa-na.amazon.com/v1/avs/speechrecognizer/recognize'
headers = {'Authorization' : 'Bearer %s' % gettoken()}
d = {
"messageHeader": {
"deviceContext": [
{
"name": "playbackState",
"namespace": "AudioPlayer",
"payload": {
"streamId": "",
"offsetInMilliseconds": "0",
"playerActivity": "IDLE"
}
}
]
},
"messageBody": {
"profile": "alexa-close-talk",
"locale": "en-us",
"format": "audio/S16; rate=16000; channels=1"
}
}
with open(path+'recording.wav') as inf:
files = [
('file', ('request', json.dumps(d), 'application/json; charset=UTF-8')),
('file', ('audio', inf, 'audio/S16; rate=16000; channels=1'))
]
r = requests.post(url, headers=headers, files=files)
if r.status_code == 200:
for v in r.headers['content-type'].split(";"):
if re.match('.*boundary.*', v):
boundary = v.split("=")[1]
data = r.content.split(boundary)
for d in data:
if (len(d) >= 1024):
audio = d.split('\r\n\r\n')[1].rstrip('--')
with open(path+"response.mp3", 'wb') as f:
f.write(audio)
sense.show_letter("!")
os.system('mpg123 -q {}response.mp3'.format(path, path)) # Writing response and playing response back to user
| {
"repo_name": "the-raspberry-pi-guy/Artificial-Intelligence-Pi",
"path": "alexa_helper.py",
"copies": "2",
"size": "3404",
"license": "mit",
"hash": 7310811381330656000,
"line_mean": 32.702970297,
"line_max": 131,
"alpha_frac": 0.6454171563,
"autogenerated": false,
"ratio": 3.196244131455399,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4841661287755399,
"avg_score": null,
"num_lines": null
} |
# Alexa Personal Assitant for Raspberry Pi
# Coded by Simon Beal and Matthew Timmons-Brown for "The Raspberry Pi Guy" YouTube channel
# Built upon the work of Sam Machin, (c)2016
# Feel free to look through the code, try to understand it & modify as you wish!
# The installer MUST be run before this code.
#!/usr/bin/python
import sys
import time
from sense_hat import SenseHat
import os
import alsaaudio
import wave
import numpy
import copy
from evdev import InputDevice, list_devices, ecodes
import alexa_helper # Import the web functions of Alexa, held in a separate program in this directory
print "Welcome to Alexa. I will help you in anyway I can.\n Press Ctrl-C to quit"
sense = SenseHat() # Initialise the SenseHAT
sense.clear() # Blank the LED matrix
# Search for the SenseHAT joystick
found = False
devices = [InputDevice(fn) for fn in list_devices()]
for dev in devices:
if dev.name == 'Raspberry Pi Sense HAT Joystick':
found = True
break
# Exit if SenseHAT not found
if not(found):
print('Raspberry Pi Sense HAT Joystick not found. Aborting ...')
sys.exit()
# Initialise audio buffer
audio = ""
inp = None
# We're British and we spell "colour" correctly :) Colour code for RAINBOWZ!!
colours = [[255, 0, 0], [255, 0, 0], [255, 105, 0], [255, 223, 0], [170, 255, 0], [52, 255, 0], [0, 255, 66], [0, 255, 183]]
# Loudness for highest bar of RGB display
max_loud = 1024
# Given a "loudness" of speech, convert into RGB LED bars and display - equaliser style
def set_display(loudness):
mini = [[0,0,0]]*8
brightness = max(1, min(loudness, max_loud) / (max_loud/8))
mini[8-brightness:] = colours[8-brightness:]
display = sum([[col]*8 for col in mini], [])
sense.set_pixels(display)
# When button is released, audio recording finishes and sent to Amazon's Alexa service
def release_button():
global audio, inp
sense.set_pixels([[0,0,0]]*64)
w = wave.open(path+'recording.wav', 'w') # This and following lines saves voice to .wav file
w.setnchannels(1)
w.setsampwidth(2)
w.setframerate(16000)
w.writeframes(audio)
w.close()
sense.show_letter("?") # Convert to question mark on display
alexa_helper.alexa(sense) # Call upon alexa_helper program (in this directory)
sense.clear() # Clear display
inp = None
audio = ""
# When button is pressed, start recording
def press_button():
global audio, inp
try:
inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NORMAL, alexa_helper.device)
except alsaaudio.ALSAAudioError:
print('Audio device not found - is your microphone connected? Please rerun program')
sys.exit()
inp.setchannels(1)
inp.setrate(16000)
inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)
inp.setperiodsize(1024)
audio = ""
l, data = inp.read()
if l:
audio += data
# Whilst button is being pressed, continue recording and set "loudness"
def continue_pressed():
global audio, inp
l, data = inp.read()
if l:
audio += data
a = numpy.fromstring(data, dtype='int16') # Converts audio data to a list of integers
loudness = int(numpy.abs(a).mean()) # Loudness is mean of amplitude of sound wave - average "loudness"
set_display(loudness) # Set the display to show this "loudness"
# Event handler for button
def handle_enter(pressed):
handlers = [release_button, press_button, continue_pressed] # 0=released, 1=pressed, 2=held
handlers[pressed]()
# Continually loops for events, if event detected and is the middle joystick button, call upon event handler above
def event_loop():
try:
for event in dev.read_loop(): # for each event
if event.type == ecodes.EV_KEY and event.code == ecodes.KEY_ENTER: # if event is a key and is the enter key (middle joystick)
handle_enter(event.value) # handle event
except KeyboardInterrupt: # If Ctrl+C pressed, pass back to main body - which then finishes and alerts the user the program has ended
pass
if __name__ == "__main__": # Run when program is called (won't run if you decide to import this program)
while alexa_helper.internet_on() == False:
print "."
token = alexa_helper.gettoken()
path = os.path.realpath(__file__).rstrip(os.path.basename(__file__))
os.system('mpg123 -q {}hello.mp3'.format(path, path)) # Say hello!
event_loop()
print "\nYou have exited Alexa. I hope that I was useful. To talk to me again just type: python main.py"
| {
"repo_name": "the-raspberry-pi-guy/Artificial-Intelligence-Pi",
"path": "main.py",
"copies": "1",
"size": "4515",
"license": "mit",
"hash": -4175548929271900000,
"line_mean": 36.625,
"line_max": 137,
"alpha_frac": 0.6819490587,
"autogenerated": false,
"ratio": 3.3247422680412373,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45066913267412373,
"avg_score": null,
"num_lines": null
} |
"""Alexa related errors."""
from homeassistant.exceptions import HomeAssistantError
from .const import API_TEMP_UNITS
class UnsupportedInterface(HomeAssistantError):
"""This entity does not support the requested Smart Home API interface."""
class UnsupportedProperty(HomeAssistantError):
"""This entity does not support the requested Smart Home API property."""
class NoTokenAvailable(HomeAssistantError):
"""There is no access token available."""
class AlexaError(Exception):
"""Base class for errors that can be serialized for the Alexa API.
A handler can raise subclasses of this to return an error to the request.
"""
namespace = None
error_type = None
def __init__(self, error_message, payload=None):
"""Initialize an alexa error."""
Exception.__init__(self)
self.error_message = error_message
self.payload = None
class AlexaInvalidEndpointError(AlexaError):
"""The endpoint in the request does not exist."""
namespace = "Alexa"
error_type = "NO_SUCH_ENDPOINT"
def __init__(self, endpoint_id):
"""Initialize invalid endpoint error."""
msg = f"The endpoint {endpoint_id} does not exist"
AlexaError.__init__(self, msg)
self.endpoint_id = endpoint_id
class AlexaInvalidValueError(AlexaError):
"""Class to represent InvalidValue errors."""
namespace = "Alexa"
error_type = "INVALID_VALUE"
class AlexaUnsupportedThermostatModeError(AlexaError):
"""Class to represent UnsupportedThermostatMode errors."""
namespace = "Alexa.ThermostatController"
error_type = "UNSUPPORTED_THERMOSTAT_MODE"
class AlexaTempRangeError(AlexaError):
"""Class to represent TempRange errors."""
namespace = "Alexa"
error_type = "TEMPERATURE_VALUE_OUT_OF_RANGE"
def __init__(self, hass, temp, min_temp, max_temp):
"""Initialize TempRange error."""
unit = hass.config.units.temperature_unit
temp_range = {
"minimumValue": {"value": min_temp, "scale": API_TEMP_UNITS[unit]},
"maximumValue": {"value": max_temp, "scale": API_TEMP_UNITS[unit]},
}
payload = {"validRange": temp_range}
msg = f"The requested temperature {temp} is out of range"
AlexaError.__init__(self, msg, payload)
class AlexaBridgeUnreachableError(AlexaError):
"""Class to represent BridgeUnreachable errors."""
namespace = "Alexa"
error_type = "BRIDGE_UNREACHABLE"
class AlexaSecurityPanelUnauthorizedError(AlexaError):
"""Class to represent SecurityPanelController Unauthorized errors."""
namespace = "Alexa.SecurityPanelController"
error_type = "UNAUTHORIZED"
class AlexaSecurityPanelAuthorizationRequired(AlexaError):
"""Class to represent SecurityPanelController AuthorizationRequired errors."""
namespace = "Alexa.SecurityPanelController"
error_type = "AUTHORIZATION_REQUIRED"
class AlexaAlreadyInOperationError(AlexaError):
"""Class to represent AlreadyInOperation errors."""
namespace = "Alexa"
error_type = "ALREADY_IN_OPERATION"
class AlexaInvalidDirectiveError(AlexaError):
"""Class to represent InvalidDirective errors."""
namespace = "Alexa"
error_type = "INVALID_DIRECTIVE"
class AlexaVideoActionNotPermittedForContentError(AlexaError):
"""Class to represent action not permitted for content errors."""
namespace = "Alexa.Video"
error_type = "ACTION_NOT_PERMITTED_FOR_CONTENT"
| {
"repo_name": "Teagan42/home-assistant",
"path": "homeassistant/components/alexa/errors.py",
"copies": "26",
"size": "3482",
"license": "apache-2.0",
"hash": 2922607705129670700,
"line_mean": 28.0166666667,
"line_max": 82,
"alpha_frac": 0.6990235497,
"autogenerated": false,
"ratio": 4.096470588235294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00010040160642570282,
"num_lines": 120
} |
"""Alexa Resources and Assets."""
class AlexaGlobalCatalog:
"""The Global Alexa catalog.
https://developer.amazon.com/docs/device-apis/resources-and-assets.html#global-alexa-catalog
You can use the global Alexa catalog for pre-defined names of devices, settings, values, and units.
This catalog is localized into all the languages that Alexa supports.
You can reference the following catalog of pre-defined friendly names.
Each item in the following list is an asset identifier followed by its supported friendly names.
The first friendly name for each identifier is the one displayed in the Alexa mobile app.
"""
# Air Purifier, Air Cleaner,Clean Air Machine
DEVICE_NAME_AIR_PURIFIER = "Alexa.DeviceName.AirPurifier"
# Fan, Blower
DEVICE_NAME_FAN = "Alexa.DeviceName.Fan"
# Router, Internet Router, Network Router, Wifi Router, Net Router
DEVICE_NAME_ROUTER = "Alexa.DeviceName.Router"
# Shade, Blind, Curtain, Roller, Shutter, Drape, Awning, Window shade, Interior blind
DEVICE_NAME_SHADE = "Alexa.DeviceName.Shade"
# Shower
DEVICE_NAME_SHOWER = "Alexa.DeviceName.Shower"
# Space Heater, Portable Heater
DEVICE_NAME_SPACE_HEATER = "Alexa.DeviceName.SpaceHeater"
# Washer, Washing Machine
DEVICE_NAME_WASHER = "Alexa.DeviceName.Washer"
# 2.4G Guest Wi-Fi, 2.4G Guest Network, Guest Network 2.4G, 2G Guest Wifi
SETTING_2G_GUEST_WIFI = "Alexa.Setting.2GGuestWiFi"
# 5G Guest Wi-Fi, 5G Guest Network, Guest Network 5G, 5G Guest Wifi
SETTING_5G_GUEST_WIFI = "Alexa.Setting.5GGuestWiFi"
# Auto, Automatic, Automatic Mode, Auto Mode
SETTING_AUTO = "Alexa.Setting.Auto"
# Direction
SETTING_DIRECTION = "Alexa.Setting.Direction"
# Dry Cycle, Dry Preset, Dry Setting, Dryer Cycle, Dryer Preset, Dryer Setting
SETTING_DRY_CYCLE = "Alexa.Setting.DryCycle"
# Fan Speed, Airflow speed, Wind Speed, Air speed, Air velocity
SETTING_FAN_SPEED = "Alexa.Setting.FanSpeed"
# Guest Wi-fi, Guest Network, Guest Net
SETTING_GUEST_WIFI = "Alexa.Setting.GuestWiFi"
# Heat
SETTING_HEAT = "Alexa.Setting.Heat"
# Mode
SETTING_MODE = "Alexa.Setting.Mode"
# Night, Night Mode
SETTING_NIGHT = "Alexa.Setting.Night"
# Opening, Height, Lift, Width
SETTING_OPENING = "Alexa.Setting.Opening"
# Oscillate, Swivel, Oscillation, Spin, Back and forth
SETTING_OSCILLATE = "Alexa.Setting.Oscillate"
# Preset, Setting
SETTING_PRESET = "Alexa.Setting.Preset"
# Quiet, Quiet Mode, Noiseless, Silent
SETTING_QUIET = "Alexa.Setting.Quiet"
# Temperature, Temp
SETTING_TEMPERATURE = "Alexa.Setting.Temperature"
# Wash Cycle, Wash Preset, Wash setting
SETTING_WASH_CYCLE = "Alexa.Setting.WashCycle"
# Water Temperature, Water Temp, Water Heat
SETTING_WATER_TEMPERATURE = "Alexa.Setting.WaterTemperature"
# Handheld Shower, Shower Wand, Hand Shower
SHOWER_HAND_HELD = "Alexa.Shower.HandHeld"
# Rain Head, Overhead shower, Rain Shower, Rain Spout, Rain Faucet
SHOWER_RAIN_HEAD = "Alexa.Shower.RainHead"
# Degrees, Degree
UNIT_ANGLE_DEGREES = "Alexa.Unit.Angle.Degrees"
# Radians, Radian
UNIT_ANGLE_RADIANS = "Alexa.Unit.Angle.Radians"
# Feet, Foot
UNIT_DISTANCE_FEET = "Alexa.Unit.Distance.Feet"
# Inches, Inch
UNIT_DISTANCE_INCHES = "Alexa.Unit.Distance.Inches"
# Kilometers
UNIT_DISTANCE_KILOMETERS = "Alexa.Unit.Distance.Kilometers"
# Meters, Meter, m
UNIT_DISTANCE_METERS = "Alexa.Unit.Distance.Meters"
# Miles, Mile
UNIT_DISTANCE_MILES = "Alexa.Unit.Distance.Miles"
# Yards, Yard
UNIT_DISTANCE_YARDS = "Alexa.Unit.Distance.Yards"
# Grams, Gram, g
UNIT_MASS_GRAMS = "Alexa.Unit.Mass.Grams"
# Kilograms, Kilogram, kg
UNIT_MASS_KILOGRAMS = "Alexa.Unit.Mass.Kilograms"
# Percent
UNIT_PERCENT = "Alexa.Unit.Percent"
# Celsius, Degrees Celsius, Degrees, C, Centigrade, Degrees Centigrade
UNIT_TEMPERATURE_CELSIUS = "Alexa.Unit.Temperature.Celsius"
# Degrees, Degree
UNIT_TEMPERATURE_DEGREES = "Alexa.Unit.Temperature.Degrees"
# Fahrenheit, Degrees Fahrenheit, Degrees F, Degrees, F
UNIT_TEMPERATURE_FAHRENHEIT = "Alexa.Unit.Temperature.Fahrenheit"
# Kelvin, Degrees Kelvin, Degrees K, Degrees, K
UNIT_TEMPERATURE_KELVIN = "Alexa.Unit.Temperature.Kelvin"
# Cubic Feet, Cubic Foot
UNIT_VOLUME_CUBIC_FEET = "Alexa.Unit.Volume.CubicFeet"
# Cubic Meters, Cubic Meter, Meters Cubed
UNIT_VOLUME_CUBIC_METERS = "Alexa.Unit.Volume.CubicMeters"
# Gallons, Gallon
UNIT_VOLUME_GALLONS = "Alexa.Unit.Volume.Gallons"
# Liters, Liter, L
UNIT_VOLUME_LITERS = "Alexa.Unit.Volume.Liters"
# Pints, Pint
UNIT_VOLUME_PINTS = "Alexa.Unit.Volume.Pints"
# Quarts, Quart
UNIT_VOLUME_QUARTS = "Alexa.Unit.Volume.Quarts"
# Ounces, Ounce, oz
UNIT_WEIGHT_OUNCES = "Alexa.Unit.Weight.Ounces"
# Pounds, Pound, lbs
UNIT_WEIGHT_POUNDS = "Alexa.Unit.Weight.Pounds"
# Close
VALUE_CLOSE = "Alexa.Value.Close"
# Delicates, Delicate
VALUE_DELICATE = "Alexa.Value.Delicate"
# High
VALUE_HIGH = "Alexa.Value.High"
# Low
VALUE_LOW = "Alexa.Value.Low"
# Maximum, Max
VALUE_MAXIMUM = "Alexa.Value.Maximum"
# Medium, Mid
VALUE_MEDIUM = "Alexa.Value.Medium"
# Minimum, Min
VALUE_MINIMUM = "Alexa.Value.Minimum"
# Open
VALUE_OPEN = "Alexa.Value.Open"
# Quick Wash, Fast Wash, Wash Quickly, Speed Wash
VALUE_QUICK_WASH = "Alexa.Value.QuickWash"
class AlexaCapabilityResource:
"""Base class for Alexa capabilityResources, ModeResources, and presetResources objects.
https://developer.amazon.com/docs/device-apis/resources-and-assets.html#capability-resources
"""
def __init__(self, labels):
"""Initialize an Alexa resource."""
self._resource_labels = []
for label in labels:
self._resource_labels.append(label)
def serialize_capability_resources(self):
"""Return capabilityResources object serialized for an API response."""
return self.serialize_labels(self._resource_labels)
@staticmethod
def serialize_configuration():
"""Return ModeResources, PresetResources friendlyNames serialized for an API response."""
return []
@staticmethod
def serialize_labels(resources):
"""Return resource label objects for friendlyNames serialized for an API response."""
labels = []
for label in resources:
if label in AlexaGlobalCatalog.__dict__.values():
label = {"@type": "asset", "value": {"assetId": label}}
else:
label = {"@type": "text", "value": {"text": label, "locale": "en-US"}}
labels.append(label)
return {"friendlyNames": labels}
class AlexaModeResource(AlexaCapabilityResource):
"""Implements Alexa ModeResources.
https://developer.amazon.com/docs/device-apis/resources-and-assets.html#capability-resources
"""
def __init__(self, labels, ordered=False):
"""Initialize an Alexa modeResource."""
super().__init__(labels)
self._supported_modes = []
self._mode_ordered = ordered
def add_mode(self, value, labels):
"""Add mode to the supportedModes object."""
self._supported_modes.append({"value": value, "labels": labels})
def serialize_configuration(self):
"""Return configuration for ModeResources friendlyNames serialized for an API response."""
mode_resources = []
for mode in self._supported_modes:
result = {
"value": mode["value"],
"modeResources": self.serialize_labels(mode["labels"]),
}
mode_resources.append(result)
return {"ordered": self._mode_ordered, "supportedModes": mode_resources}
class AlexaPresetResource(AlexaCapabilityResource):
"""Implements Alexa PresetResources.
Use presetResources with RangeController to provide a set of friendlyNames for each RangeController preset.
https://developer.amazon.com/docs/device-apis/resources-and-assets.html#presetresources
"""
def __init__(self, labels, min_value, max_value, precision, unit=None):
"""Initialize an Alexa presetResource."""
super().__init__(labels)
self._presets = []
self._minimum_value = int(min_value)
self._maximum_value = int(max_value)
self._precision = int(precision)
self._unit_of_measure = None
if unit in AlexaGlobalCatalog.__dict__.values():
self._unit_of_measure = unit
def add_preset(self, value, labels):
"""Add preset to configuration presets array."""
self._presets.append({"value": value, "labels": labels})
def serialize_configuration(self):
"""Return configuration for PresetResources friendlyNames serialized for an API response."""
configuration = {
"supportedRange": {
"minimumValue": self._minimum_value,
"maximumValue": self._maximum_value,
"precision": self._precision,
}
}
if self._unit_of_measure:
configuration["unitOfMeasure"] = self._unit_of_measure
if self._presets:
preset_resources = []
for preset in self._presets:
preset_resources.append(
{
"rangeValue": preset["value"],
"presetResources": self.serialize_labels(preset["labels"]),
}
)
configuration["presets"] = preset_resources
return configuration
class AlexaSemantics:
"""Class for Alexa Semantics Object.
You can optionally enable additional utterances by using semantics. When you use semantics,
you manually map the phrases "open", "close", "raise", and "lower" to directives.
Semantics is supported for the following interfaces only: ModeController, RangeController, and ToggleController.
https://developer.amazon.com/docs/device-apis/alexa-discovery.html#semantics-object
"""
MAPPINGS_ACTION = "actionMappings"
MAPPINGS_STATE = "stateMappings"
ACTIONS_TO_DIRECTIVE = "ActionsToDirective"
STATES_TO_VALUE = "StatesToValue"
STATES_TO_RANGE = "StatesToRange"
ACTION_CLOSE = "Alexa.Actions.Close"
ACTION_LOWER = "Alexa.Actions.Lower"
ACTION_OPEN = "Alexa.Actions.Open"
ACTION_RAISE = "Alexa.Actions.Raise"
STATES_OPEN = "Alexa.States.Open"
STATES_CLOSED = "Alexa.States.Closed"
DIRECTIVE_RANGE_SET_VALUE = "SetRangeValue"
DIRECTIVE_RANGE_ADJUST_VALUE = "AdjustRangeValue"
DIRECTIVE_TOGGLE_TURN_ON = "TurnOn"
DIRECTIVE_TOGGLE_TURN_OFF = "TurnOff"
DIRECTIVE_MODE_SET_MODE = "SetMode"
DIRECTIVE_MODE_ADJUST_MODE = "AdjustMode"
def __init__(self):
"""Initialize an Alexa modeResource."""
self._action_mappings = []
self._state_mappings = []
def _add_action_mapping(self, semantics):
"""Add action mapping between actions and interface directives."""
self._action_mappings.append(semantics)
def _add_state_mapping(self, semantics):
"""Add state mapping between states and interface directives."""
self._state_mappings.append(semantics)
def add_states_to_value(self, states, value):
"""Add StatesToValue stateMappings."""
self._add_state_mapping(
{"@type": self.STATES_TO_VALUE, "states": states, "value": value}
)
def add_states_to_range(self, states, min_value, max_value):
"""Add StatesToRange stateMappings."""
self._add_state_mapping(
{
"@type": self.STATES_TO_RANGE,
"states": states,
"range": {"minimumValue": min_value, "maximumValue": max_value},
}
)
def add_action_to_directive(self, actions, directive, payload):
"""Add ActionsToDirective actionMappings."""
self._add_action_mapping(
{
"@type": self.ACTIONS_TO_DIRECTIVE,
"actions": actions,
"directive": {"name": directive, "payload": payload},
}
)
def serialize_semantics(self):
"""Return semantics object serialized for an API response."""
semantics = {}
if self._action_mappings:
semantics[self.MAPPINGS_ACTION] = self._action_mappings
if self._state_mappings:
semantics[self.MAPPINGS_STATE] = self._state_mappings
return semantics
| {
"repo_name": "leppa/home-assistant",
"path": "homeassistant/components/alexa/resources.py",
"copies": "1",
"size": "12713",
"license": "apache-2.0",
"hash": -7156532409524071000,
"line_mean": 31.850129199,
"line_max": 116,
"alpha_frac": 0.6503579014,
"autogenerated": false,
"ratio": 3.4583786724700762,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4608736573870076,
"avg_score": null,
"num_lines": null
} |
APPLICATION_ID = "amzn1.ask.skill.dd677950-cade-4805-b1f1-ce2e3a3569f0"
# Bible API
BIBLE_TRANSLATION = "GNBDC" # Can't use NIV - it's still in copyright
BIBLE_API_URL = "https://bibles.org/v2/eng-{translation}/passages.js".format(
translation=BIBLE_TRANSLATION)
# Sermons
SERMONS_XML_URL = "http://www.christchurchmayfair.org/our-talks/podcast/"
SERMONS_XML_NAMESPACE = {
"ccm": "http://www.christchurchmayfair.org/",
"itunes": "http://www.itunes.com/dtds/podcast-1.0.dtd"
}
SERMONS_XML_SERVICE_NAMES = {"morning": "AM Service", "evening": "6PM Service"}
# Alexa audio must be served from https endpoint
HTTP_MP3_TO_HTTPS_M3U_API_URL = ("https://0elu033c2a.execute-api.eu-west-1.amazonaws.com/prod/"
"m3uGenerator")
# Config for correction of AMAZON.Date defaulting to future date if year not given
FUTURE_DAYS_GO_BACK_YEAR_THRESHOLD_SERMONS = 30
FUTURE_DAYS_GO_BACK_YEAR_THRESHOLD_PASSAGES = 150
# CCM Events
EVENTS_JSON_URL = "https://ccmayfair.churchsuite.co.uk/embed/calendar/json"
| {
"repo_name": "mauriceyap/ccm-assistant",
"path": "src/config.py",
"copies": "1",
"size": "1051",
"license": "mit",
"hash": 7596480830766024000,
"line_mean": 41.04,
"line_max": 95,
"alpha_frac": 0.7164605138,
"autogenerated": false,
"ratio": 2.6144278606965172,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3830888374496517,
"avg_score": null,
"num_lines": null
} |
"""Alexa Skills represent the logic used to build JSON responses to
events as specified in the Alexa Skills Kit."""
import functools
from alexa.response import response_to_dict
def intent_callback(intent_name):
"""Makes the decorated method activate on the correct intent.
This crazy black magic works by doing a runtime check on the name
of the intent and returning None if it doesn't match. This means
that every registered function is called.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(self, intent, session):
if intent['name'] == intent_name:
return func(self, intent, session)
else:
return None
return wrapper
return decorator
class AlexaSkill(object):
"""Base class for an Alexa Skill."""
def __init__(self):
self._intents = {}
def handle(self, event, context):
"""Main entry point into the Alexa Skill.
Most Alexa Skills should not need to override this method.
:param event: The AWS Lambda event
:param context: The AWS Lambda context
:return: The response built
"""
request = event['request']
session = event['session']
response = None
print(request)
print(session)
if session['new']:
self.start_session(request['requestId'], session)
request_type = request['type']
if request_type == 'LaunchRequest':
alexa_response = self.handle_launch(request, session)
response = response_to_dict(alexa_response)
elif request_type == 'IntentRequest':
alexa_response = self.handle_intent(request['intent'], session)
response = response_to_dict(alexa_response)
elif request_type == 'SessionEndedRequest':
self.end_session(request, session)
print(response)
return response
def start_session(self, request, session):
"""Handles a new Amazon Echo session.
This method should be used to initialize any state needed for
a session. The return value is not used.
:param request: The request object from the Amazon Echo event
:param session: The session object from the Amazon Echo event
"""
pass
def handle_launch(self, request, session):
"""Handles a LaunchRequest from the Amazon Echo.
:param request: The request object from the Amazon Echo event
:param session: The session object from the Amazon Echo event
:return: Session attributes and speechlet response
"""
raise NotImplementedError("Must override handle_launch")
def handle_intent(self, intent, session):
"""Handles an intent from the Amazon Echo.
Most Alexa Skills should not need to override this method.
Instead, subclasses should define methods and use the
`intent_callback` decorator to associate their methods with
a particular intent.
Note: this is horrible black magic and can probably be dealt
with in a cleaner fashion, but it makes the API really pretty,
so I've decided to keep it.
:param intent: The intent object from the Amazon Echo request
:param session: The session object from the Amazon Echo event
:return: Session attributes and speechlet response
"""
# Get the first public method defined in the subclass *not*
# defined in this superclass that does not return None
subclass_methods = set(dir(self)) - set(dir(AlexaSkill()))
ret = None
for subclass_method in subclass_methods:
if not subclass_method.startswith('_'):
method = getattr(self, subclass_method)
if hasattr(method, '__call__'):
ret = method(intent, session)
if ret is not None:
break
return ret
def end_session(self, request, session):
"""Handles the end of an Amazon Echo session.
The return value is not used.
:param intent: The intent object from the Amazon Echo request
:param session: The session object from the Amazon Echo event
"""
pass
| {
"repo_name": "ianonavy/python-alexa-skills-kit",
"path": "alexa/skill.py",
"copies": "1",
"size": "4286",
"license": "mit",
"hash": -4089579771239716000,
"line_mean": 33.564516129,
"line_max": 75,
"alpha_frac": 0.629724685,
"autogenerated": false,
"ratio": 4.821147356580427,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 124
} |
"""Alexa Skill to harness the power of the Giant Bomb API."""
import sys
import logging
from flask import Flask, render_template
from flask_ask import Ask, statement, question, session
from gb import api
app = Flask(__name__)
ask = Ask(app, '/')
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
giant_bomb = api.GBApi()
@ask.launch
def launch():
"""Start the skill."""
greeting_text = render_template('greeting')
reprompt_text = render_template('reprompt')
return question(greeting_text).reprompt(reprompt_text)
@ask.intent('GetAnswerIntent', mapping={'title': 'Title'}, default={'title': ''})
def answer(title):
"""The default intent to be triggered. Uses the title to search the GB API.
:param title: the title to search in the wiki database
:returns: a `flask-ask.statement` result with the given template text
"""
if not title:
nothing_text = render_template('nothing')
return question(nothing_text)
lookup = giant_bomb.whatis(title)
print("Lookup: {}".format(lookup))
if lookup.match:
found_text = render_template('found', name=lookup.name, release=lookup.release_human,
deck=lookup.deck)
return statement(found_text)
notfound_text = render_template('notfound', name=title)
more_text = render_template('more')
return statement(notfound_text)
@ask.intent('AMAZON.HelpIntent')
def help():
"""Give the user the help text."""
help_text = render_template('reprompt')
return question(help_text).reprompt(help_text)
@ask.intent('AMAZON.StopIntent')
def stop():
"""Allow the user to stop interacting."""
return statement("Goodbye")
@ask.intent('AMAZON.CancelIntent')
def cancel():
"""Allow the user to cancel the interaction."""
return statement("Goodbye")
@ask.session_ended
def session_ended():
"""End the session gracefully."""
return "", 200
def main():
"""Utility method to run the app if outside of lambda."""
app.run()
if __name__ == '__main__':
main() | {
"repo_name": "jaykwon/giantanswers",
"path": "skill.py",
"copies": "1",
"size": "2063",
"license": "mit",
"hash": 1407605121719263000,
"line_mean": 25.1265822785,
"line_max": 94,
"alpha_frac": 0.6621425109,
"autogenerated": false,
"ratio": 3.670818505338078,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4832961016238078,
"avg_score": null,
"num_lines": null
} |
"""Alexa Skill to look up the flavor forecast for The Diary Godmother."""
import sys
import logging
import datetime
from flask import Flask, render_template
from flask_ask import Ask, statement, question, convert_errors, session
import api
app = Flask(__name__)
ask = Ask(app, '/')
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
forecast = api.DGMApi()
@ask.launch
def launch():
"""Start the skill."""
greeting_text = render_template('greeting')
reprompt_text = render_template('reprompt')
return question(greeting_text).reprompt(reprompt_text)
@ask.intent('GetSearchDateIntent', convert={'date': 'date'})
def search_date(date):
"""The default intent to be triggered. Uses the date to search the DGM API.
:param date: the date to search for the flavor of the day
:returns: a `flask-ask.statement` result with the given template text
"""
# handle any date converrsion errors
if 'date' in convert_errors:
# since age failed to convert, it keeps its string
# value (e.g. "?") for later interrogation.
repeatdate_text = render_template('repeatdate')
return question(repeatedate_text)
if(not date):
nodate_text = render_template('nodate')
return question(nodate_text)
result = forecast.search(date)
print("Searching for the flavor of the day for: {}".format(date))
return _search(result)
@ask.intent('GetSearchIntent')
def search():
"""The search for current day intent to be triggered. Uses the title to search
the DGM API.
:param date: the date to search for the flavor of the day
:returns: a `flask-ask.statement` result with the given template text
"""
date = datetime.datetime.now()
result = forecast.search(date)
print("Searching for the flavor of the day for: {}".format(date))
return _search(result)
def _search(result):
"""Helper method for both the `GetSearchDateIntent` and the `GetSearchIntent`.
Will take the search results and format them into an appropriate `flask-ask.statement`.
:returns: a `flask-ask.statement` result with the given template text
"""
if(result.has_error):
error_text = render_template(
'searcherror', date=result.humanized_date)
return question(error_text)
elif(result.found):
# if more than one result prepend `and` to each result
if(result.size == 1):
flavor_text = result.flavors[0]
else:
flavor_text = ('and ').join(result.flavors)
found_text = render_template(
'found', date=result.date, flavors=flavor_text)
return statement(found_text)
else:
# check if store closed
if(result.closed):
print 'Result: ', result
closed_text = render_template('notfoundclosed', date=result.date)
return statement(closed_text)
else:
notfound_text = render_template('notfound', date=result.date)
return statement(notfound_text)
@ask.intent('GetOpenIntent')
def open():
"""Determines if The Diary Godmother is open or not based on the current time.
Method also returns the time left until open or the time left until close
based on the status.
:returns: a `flask-ask.statement` result with the given template text
"""
status = forecast.get_status()
# check if open
if(status.is_open):
opennow_text = render_template('opennow', time=status.time_left)
return statement(opennow_text)
else:
closednow_text = render_template('closednow', time=status.time_left)
return statement(closednow_text)
@ask.intent('GetClosedIntent')
def closed():
"""Determines if The Diary Godmother is closed or not based on the current time.
Method also returns the time left until open or the time left until close
based on the status.
:returns: a `flask-ask.statement` result with the given template text
"""
status = forecast.get_status()
# check if open
if(status.is_open):
opennow_text = render_template('opennow', time=status.time_left)
return statement(opennow_text)
else:
closednow_text = render_template('closednow', time=status.time_left)
return statement(closednow_text)
@ask.intent('GetOpenDateIntent', convert={'date': 'date'})
def open_date(date):
"""Checks if The Dairy Godmother is open or closed on a particular day.
:param date: the `datetime` date
:returns: a `flask-ask.statement` result with the given template text
"""
# handle any date converrsion errors
if 'date' in convert_errors:
# since age failed to convert, it keeps its string
# value (e.g. "?") for later interrogation.
return question("Can you please repeat the date?")
if(not date):
nodate_text = render_template('nodate')
return question(nodate_text)
# query to ensure was not randomly closed
result = forecast.search(date)
if(result.closed):
closeddate_text = render_template('closeddate', date=date)
return statement(closeddate_text)
else:
# query for the hours on that day
hours = forecast.operating_hours(date)
openndate_text = render_template(
'opendate', date=date, start=hours.open_str, end=hours.close_str)
return statement(openndate_text)
@ask.intent('GetHoursIntent')
def hours():
"""Gives the operating hours of The Dairy Godmother
:returns: a `flask-ask.statement` result with the given template text
"""
hours_text = render_template('hours')
return statement(hours_text)
@ask.intent('GetLocationIntent')
def location():
"""Gives the address of The Dairy Godmother
:returns: a `flask-ask.statement` result with the given template text
"""
location_text = render_template('location')
return statement(location_text)
@ask.intent('GetAboutIntent')
def hours():
"""Gives information about the creator of this alexa skill.
:returns: a `flask-ask.statement` result with the given template text
"""
about_text = render_template('about')
return statement(about_text)
@ask.intent('AMAZON.HelpIntent')
def help():
"""Give the user the help text."""
help_text = render_template('reprompt')
return question(help_text).reprompt(help_text)
@ask.intent('AMAZON.StopIntent')
def stop():
"""Allow the user to stop interacting."""
return statement("Goodbye")
@ask.intent('AMAZON.CancelIntent')
def cancel():
"""Allow the user to cancel the interaction."""
return statement("Goodbye")
@ask.session_ended
def session_ended():
"""End the session gracefully."""
return "", 200
def main():
"""Utility method to run the app if outside of lambda."""
app.run()
if __name__ == '__main__':
main()
| {
"repo_name": "pasharkey/flavorforecast",
"path": "src/skill.py",
"copies": "1",
"size": "6859",
"license": "mit",
"hash": 2838073168650510300,
"line_mean": 30.036199095,
"line_max": 91,
"alpha_frac": 0.6662778831,
"autogenerated": false,
"ratio": 3.8105555555555557,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49768334386555557,
"avg_score": null,
"num_lines": null
} |
"""Alexa state report code."""
import asyncio
import json
import logging
import aiohttp
import async_timeout
from homeassistant.const import MATCH_ALL
from .const import API_CHANGE, Cause
from .entities import ENTITY_ADAPTERS
from .messages import AlexaResponse
_LOGGER = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 10
async def async_enable_proactive_mode(hass, smart_home_config):
"""Enable the proactive mode.
Proactive mode makes this component report state changes to Alexa.
"""
# Validate we can get access token.
await smart_home_config.async_get_access_token()
async def async_entity_state_listener(changed_entity, old_state, new_state):
if not new_state:
return
if new_state.domain not in ENTITY_ADAPTERS:
return
if not smart_home_config.should_expose(changed_entity):
_LOGGER.debug("Not exposing %s because filtered by config", changed_entity)
return
alexa_changed_entity = ENTITY_ADAPTERS[new_state.domain](
hass, smart_home_config, new_state
)
for interface in alexa_changed_entity.interfaces():
if interface.properties_proactively_reported():
await async_send_changereport_message(
hass, smart_home_config, alexa_changed_entity
)
return
return hass.helpers.event.async_track_state_change(
MATCH_ALL, async_entity_state_listener
)
async def async_send_changereport_message(
hass, config, alexa_entity, *, invalidate_access_token=True
):
"""Send a ChangeReport message for an Alexa entity.
https://developer.amazon.com/docs/smarthome/state-reporting-for-a-smart-home-skill.html#report-state-with-changereport-events
"""
token = await config.async_get_access_token()
headers = {"Authorization": f"Bearer {token}"}
endpoint = alexa_entity.alexa_id()
# this sends all the properties of the Alexa Entity, whether they have
# changed or not. this should be improved, and properties that have not
# changed should be moved to the 'context' object
properties = list(alexa_entity.serialize_properties())
payload = {
API_CHANGE: {"cause": {"type": Cause.APP_INTERACTION}, "properties": properties}
}
message = AlexaResponse(name="ChangeReport", namespace="Alexa", payload=payload)
message.set_endpoint_full(token, endpoint)
message_serialized = message.serialize()
session = hass.helpers.aiohttp_client.async_get_clientsession()
try:
with async_timeout.timeout(DEFAULT_TIMEOUT):
response = await session.post(
config.endpoint,
headers=headers,
json=message_serialized,
allow_redirects=True,
)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Timeout sending report to Alexa.")
return
response_text = await response.text()
_LOGGER.debug("Sent: %s", json.dumps(message_serialized))
_LOGGER.debug("Received (%s): %s", response.status, response_text)
if response.status == 202:
return
response_json = json.loads(response_text)
if (
response_json["payload"]["code"] == "INVALID_ACCESS_TOKEN_EXCEPTION"
and not invalidate_access_token
):
config.async_invalidate_access_token()
return await async_send_changereport_message(
hass, config, alexa_entity, invalidate_access_token=False
)
_LOGGER.error(
"Error when sending ChangeReport to Alexa: %s: %s",
response_json["payload"]["code"],
response_json["payload"]["description"],
)
async def async_send_add_or_update_message(hass, config, entity_ids):
"""Send an AddOrUpdateReport message for entities.
https://developer.amazon.com/docs/device-apis/alexa-discovery.html#add-or-update-report
"""
token = await config.async_get_access_token()
headers = {"Authorization": f"Bearer {token}"}
endpoints = []
for entity_id in entity_ids:
domain = entity_id.split(".", 1)[0]
if domain not in ENTITY_ADAPTERS:
continue
alexa_entity = ENTITY_ADAPTERS[domain](hass, config, hass.states.get(entity_id))
endpoints.append(alexa_entity.serialize_discovery())
payload = {"endpoints": endpoints, "scope": {"type": "BearerToken", "token": token}}
message = AlexaResponse(
name="AddOrUpdateReport", namespace="Alexa.Discovery", payload=payload
)
message_serialized = message.serialize()
session = hass.helpers.aiohttp_client.async_get_clientsession()
return await session.post(
config.endpoint, headers=headers, json=message_serialized, allow_redirects=True
)
async def async_send_delete_message(hass, config, entity_ids):
"""Send an DeleteReport message for entities.
https://developer.amazon.com/docs/device-apis/alexa-discovery.html#deletereport-event
"""
token = await config.async_get_access_token()
headers = {"Authorization": f"Bearer {token}"}
endpoints = []
for entity_id in entity_ids:
domain = entity_id.split(".", 1)[0]
if domain not in ENTITY_ADAPTERS:
continue
alexa_entity = ENTITY_ADAPTERS[domain](hass, config, hass.states.get(entity_id))
endpoints.append({"endpointId": alexa_entity.alexa_id()})
payload = {"endpoints": endpoints, "scope": {"type": "BearerToken", "token": token}}
message = AlexaResponse(
name="DeleteReport", namespace="Alexa.Discovery", payload=payload
)
message_serialized = message.serialize()
session = hass.helpers.aiohttp_client.async_get_clientsession()
return await session.post(
config.endpoint, headers=headers, json=message_serialized, allow_redirects=True
)
| {
"repo_name": "Cinntax/home-assistant",
"path": "homeassistant/components/alexa/state_report.py",
"copies": "1",
"size": "5879",
"license": "apache-2.0",
"hash": 4879994860031726000,
"line_mean": 30.6075268817,
"line_max": 129,
"alpha_frac": 0.6625276408,
"autogenerated": false,
"ratio": 3.961590296495957,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006149519853037989,
"num_lines": 186
} |
"""Alexa state report code."""
import asyncio
import json
import logging
import aiohttp
import async_timeout
from homeassistant.const import MATCH_ALL, STATE_ON
import homeassistant.util.dt as dt_util
from .const import API_CHANGE, Cause
from .entities import ENTITY_ADAPTERS
from .messages import AlexaResponse
_LOGGER = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 10
async def async_enable_proactive_mode(hass, smart_home_config):
"""Enable the proactive mode.
Proactive mode makes this component report state changes to Alexa.
"""
# Validate we can get access token.
await smart_home_config.async_get_access_token()
async def async_entity_state_listener(changed_entity, old_state, new_state):
if not hass.is_running:
return
if not new_state:
return
if new_state.domain not in ENTITY_ADAPTERS:
return
if not smart_home_config.should_expose(changed_entity):
_LOGGER.debug("Not exposing %s because filtered by config", changed_entity)
return
alexa_changed_entity = ENTITY_ADAPTERS[new_state.domain](
hass, smart_home_config, new_state
)
for interface in alexa_changed_entity.interfaces():
if interface.properties_proactively_reported():
await async_send_changereport_message(
hass, smart_home_config, alexa_changed_entity
)
return
if (
interface.name() == "Alexa.DoorbellEventSource"
and new_state.state == STATE_ON
):
await async_send_doorbell_event_message(
hass, smart_home_config, alexa_changed_entity
)
return
return hass.helpers.event.async_track_state_change(
MATCH_ALL, async_entity_state_listener
)
async def async_send_changereport_message(
hass, config, alexa_entity, *, invalidate_access_token=True
):
"""Send a ChangeReport message for an Alexa entity.
https://developer.amazon.com/docs/smarthome/state-reporting-for-a-smart-home-skill.html#report-state-with-changereport-events
"""
token = await config.async_get_access_token()
headers = {"Authorization": f"Bearer {token}"}
endpoint = alexa_entity.alexa_id()
# this sends all the properties of the Alexa Entity, whether they have
# changed or not. this should be improved, and properties that have not
# changed should be moved to the 'context' object
properties = list(alexa_entity.serialize_properties())
payload = {
API_CHANGE: {"cause": {"type": Cause.APP_INTERACTION}, "properties": properties}
}
message = AlexaResponse(name="ChangeReport", namespace="Alexa", payload=payload)
message.set_endpoint_full(token, endpoint)
message_serialized = message.serialize()
session = hass.helpers.aiohttp_client.async_get_clientsession()
try:
with async_timeout.timeout(DEFAULT_TIMEOUT):
response = await session.post(
config.endpoint,
headers=headers,
json=message_serialized,
allow_redirects=True,
)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Timeout sending report to Alexa.")
return
response_text = await response.text()
_LOGGER.debug("Sent: %s", json.dumps(message_serialized))
_LOGGER.debug("Received (%s): %s", response.status, response_text)
if response.status == 202:
return
response_json = json.loads(response_text)
if (
response_json["payload"]["code"] == "INVALID_ACCESS_TOKEN_EXCEPTION"
and not invalidate_access_token
):
config.async_invalidate_access_token()
return await async_send_changereport_message(
hass, config, alexa_entity, invalidate_access_token=False
)
_LOGGER.error(
"Error when sending ChangeReport to Alexa: %s: %s",
response_json["payload"]["code"],
response_json["payload"]["description"],
)
async def async_send_add_or_update_message(hass, config, entity_ids):
"""Send an AddOrUpdateReport message for entities.
https://developer.amazon.com/docs/device-apis/alexa-discovery.html#add-or-update-report
"""
token = await config.async_get_access_token()
headers = {"Authorization": f"Bearer {token}"}
endpoints = []
for entity_id in entity_ids:
domain = entity_id.split(".", 1)[0]
if domain not in ENTITY_ADAPTERS:
continue
alexa_entity = ENTITY_ADAPTERS[domain](hass, config, hass.states.get(entity_id))
endpoints.append(alexa_entity.serialize_discovery())
payload = {"endpoints": endpoints, "scope": {"type": "BearerToken", "token": token}}
message = AlexaResponse(
name="AddOrUpdateReport", namespace="Alexa.Discovery", payload=payload
)
message_serialized = message.serialize()
session = hass.helpers.aiohttp_client.async_get_clientsession()
return await session.post(
config.endpoint, headers=headers, json=message_serialized, allow_redirects=True
)
async def async_send_delete_message(hass, config, entity_ids):
"""Send an DeleteReport message for entities.
https://developer.amazon.com/docs/device-apis/alexa-discovery.html#deletereport-event
"""
token = await config.async_get_access_token()
headers = {"Authorization": f"Bearer {token}"}
endpoints = []
for entity_id in entity_ids:
domain = entity_id.split(".", 1)[0]
if domain not in ENTITY_ADAPTERS:
continue
alexa_entity = ENTITY_ADAPTERS[domain](hass, config, hass.states.get(entity_id))
endpoints.append({"endpointId": alexa_entity.alexa_id()})
payload = {"endpoints": endpoints, "scope": {"type": "BearerToken", "token": token}}
message = AlexaResponse(
name="DeleteReport", namespace="Alexa.Discovery", payload=payload
)
message_serialized = message.serialize()
session = hass.helpers.aiohttp_client.async_get_clientsession()
return await session.post(
config.endpoint, headers=headers, json=message_serialized, allow_redirects=True
)
async def async_send_doorbell_event_message(hass, config, alexa_entity):
"""Send a DoorbellPress event message for an Alexa entity.
https://developer.amazon.com/docs/smarthome/send-events-to-the-alexa-event-gateway.html
"""
token = await config.async_get_access_token()
headers = {"Authorization": f"Bearer {token}"}
endpoint = alexa_entity.alexa_id()
message = AlexaResponse(
name="DoorbellPress",
namespace="Alexa.DoorbellEventSource",
payload={
"cause": {"type": Cause.PHYSICAL_INTERACTION},
"timestamp": f"{dt_util.utcnow().replace(tzinfo=None).isoformat()}Z",
},
)
message.set_endpoint_full(token, endpoint)
message_serialized = message.serialize()
session = hass.helpers.aiohttp_client.async_get_clientsession()
try:
with async_timeout.timeout(DEFAULT_TIMEOUT):
response = await session.post(
config.endpoint,
headers=headers,
json=message_serialized,
allow_redirects=True,
)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Timeout sending report to Alexa.")
return
response_text = await response.text()
_LOGGER.debug("Sent: %s", json.dumps(message_serialized))
_LOGGER.debug("Received (%s): %s", response.status, response_text)
if response.status == 202:
return
response_json = json.loads(response_text)
_LOGGER.error(
"Error when sending DoorbellPress event to Alexa: %s: %s",
response_json["payload"]["code"],
response_json["payload"]["description"],
)
| {
"repo_name": "robbiet480/home-assistant",
"path": "homeassistant/components/alexa/state_report.py",
"copies": "2",
"size": "7965",
"license": "apache-2.0",
"hash": 8588217609352270000,
"line_mean": 30.4822134387,
"line_max": 129,
"alpha_frac": 0.6462021343,
"autogenerated": false,
"ratio": 3.994483450351053,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5640685584651053,
"avg_score": null,
"num_lines": null
} |
ALEXA_VERSION_NUMBER = 1
STOP_INTENT = 'AMAZON.StopIntent'
CANCEL_INTENT = 'AMAZON.CancelIntent'
HELP_INTENT = 'AMAZON.HelpIntent'
TYPE_LAUNCH = 'LaunchRequest'
ALEXA_DISCOVERY = 'DiscoverAppliancesRequest'
ALEXA_DISCOVERY_NAMESPACE = 'Alexa.ConnectedHome.Discovery'
ALEXA_CONTROL_NAMESPACE = 'Alexa.ConnectedHome.Control'
STATE = 'state'
DEVICE = 'device'
LEVEL = 'level'
MODE = 'mode'
SWITCH_INTENT = 'Switch'
DIMMER_INTENT = 'Dimmer'
MODE_INTENT = 'ChangeMode'
SUPPORTED_INTENTS = {
SWITCH_INTENT: {
'required': [STATE, DEVICE]
},
DIMMER_INTENT: {
'required': [LEVEL, DEVICE]
},
MODE_INTENT: {
'required': [MODE]
}
}
def make_response(output_speech, card_content, output_type="PlainText", card_title="Firefly Smart Home",
card_type="Simple", end_session=True):
response = {
"version": ALEXA_VERSION_NUMBER,
"response": {
"outputSpeech": {
"type": output_type,
"text": output_speech
},
"card": {
"type": card_type,
"title": card_title,
"content": card_content
},
'shouldEndSession': end_session
}
}
return response
STOP_RESPONSE = make_response('', 'Request Canceled')
HELP_RESPONSE = make_response(
'With this skill you can control your firefly system. For example you can say, Alexa, tell firefly to turn off '
'kitchen lights. What would you like me to do right now?',
'With this skill you can control your firefly system. For example you can say, Alexa, tell firefly home to turn '
'off kitchen lights. What would you like me to do right now?', end_session=False)
WELCOME_RESPONSE = make_response(
'Welcome to Firefly. With this skill you can control your firefly system. For example you can say, Alexa, '
'tell firefly to turn off kitchen lights. What would you like me to do right now?',
'Welcome to Firefly. With this skill you can control your firefly system. For example you can say, Alexa, '
'tell firefly home to turn off kitchen lights. What would you like me to do right now?', end_session=False)
UNSUPPORTED_COMMAND = make_response('The requested command is unsupported', 'The requested command is unsupported')
REQUEST_SLOT_FILLING = {
"version": ALEXA_VERSION_NUMBER,
"response": {
"directives": [{
"type": "Dialog.Delegate"
}],
"shouldEndSession": False
},
"sessionAttributes": {}
}
## ALEXA SMART HOME CONST
ALEXA_INTERFACE = {
"type": "AlexaInterface",
"interface": "Alexa",
"version": "3"
}
ALEXA_COLOR_TEMPERATURE_INTERFACE = {
"type": "AlexaInterface",
"interface": "Alexa.ColorTemperatureController",
"version": "3",
"properties": {
"supported": [
{
"name": "colorTemperatureInKelvin"
}
],
"proactivelyReported": False,
"retrievable": True
}
}
ALEXA_COLOR_INTERFACE = {
"type": "AlexaInterface",
"interface": "Alexa.ColorController",
"version": "3",
"properties": {
"supported": [
{
"name": "color"
}
],
"proactivelyReported": False,
"retrievable": True
}
}
ALEXA_BRIGHTNESS_INTERFACE = {
"type": "AlexaInterface",
"interface": "Alexa.BrightnessController",
"version": "3",
"properties": {
"supported": [
{
"name": "brightness"
}
],
"proactivelyReported": False,
"retrievable": True
}
}
ALEXA_POWER_INTERFACE = {
"type": "AlexaInterface",
"interface": "Alexa.PowerController",
"version": "3",
"properties": {
"supported": [
{
"name": "power"
}
],
"proactivelyReported": False,
"retrievable": True
}
}
ALEXA_POWER_LEVEL_INTERFACE = {
"type": "AlexaInterface",
"interface": "Alexa.PowerLevelController",
"version": "3",
"properties": {
"supported": [
{
"name": "powerLevel"
}
],
"proactivelyReported": False,
"retrievable": True
}
}
ALEXA_PERCENTAGE_INTERFACE = {
"type": "AlexaInterface",
"interface": "Alexa.PercentageController",
"version": "3",
"properties": {
"supported": [
{
"name": "percentage"
}
],
"proactivelyReported": False,
"retrievable": True
}
}
ALEXA_HEALTH_INTERFACE = {
"type": "AlexaInterface",
"interface": "Alexa.EndpointHealth",
"version": "3",
"properties": {
"supported": [
{
"name": "connectivity"
}
],
"proactivelyReported": False,
"retrievable": True
}
}
ALEXA_TEMPERATURE_INTERFACE = {
"type": "AlexaInterface",
"interface": "Alexa.TemperatureSensor",
"version": "3",
"properties": {
"supported": [
{
"name": "temperature"
}
],
"proactivelyReported": False,
"retrievable": True
}
}
ALEXA_LIGHT = 'LIGHT'
ALEXA_SMARTPLUG = 'SMARTPLUG'
ALEXA_SWITCH = 'SWITCH'
ALEXA_DOOR = 'DOOR'
ALEXA_SMARTLOCK = 'SMARTLOCK'
ALEXA_SPEAKERS = 'SPEAKERS'
ALEXA_TEMPERATURE_SENSOR = 'TEMPERATURE_SENSOR'
ALEXA_THERMOSTAT = 'THERMOSTAT'
ALEXA_TV = 'TV'
| {
"repo_name": "Firefly-Automation/Firefly",
"path": "Firefly/services/alexa/alexa_const.py",
"copies": "1",
"size": "5246",
"license": "apache-2.0",
"hash": 1948325477456009000,
"line_mean": 23.6291079812,
"line_max": 117,
"alpha_frac": 0.5947388486,
"autogenerated": false,
"ratio": 3.2124923453766074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4307231193976607,
"avg_score": null,
"num_lines": null
} |
# Alex Ciaramella and Greg Suner
# Abstract Tournament Class
# Tournament is observable while players are observers
import Message
import Observable
import Display
import ScoreKeeper
class Tournament(Observable.Observable):
# set up a list of players when tournament is initialized
def __init__(self):
Observable.Observable.__init__(self)
self.playerList = []
self.game = None
self.display = None
self.scorekeeper = ScoreKeeper.ScoreKeeper()
def attach_display(self, display):
self.display = display
self.add_observer(self.display)
# Returns the players in the tournament
def get_players(self):
return self.playerList
# run the tournament
def run(self):
self.begin_tournament()
while True:
match = self.create_next_match()
if match is None:
break
self.play_match(match)
self.end_tournament()
self.scorekeeper.print_final_stats()
# get a reference to the next game to be played
def create_next_match(self):
pass
# register a player for the tournament by adding them to
# the list of current players
def register_player(self, player):
self.playerList.append(player)
self.add_observer(player)
# stores a reference to the type of game we will be playing
def set_game(self, game):
self.game = game
# Computes the result of a round based on the moves made by the players
def get_result(self, moves):
return self.game.get_result(moves)
# play the next match and return the results
def play_match(self, match):
players = match[0]
self.start_match(players)
result = self.play_rounds(match) # play_rounds should return a value, but doesn't... TODO??
self.end_match(players, result)
# plays each individual game in the match
"""
This function should return a result, but when it does return result,
it stops the match in the preceding play_match function.
This is likely a bug, but I haven't figured out a solution to this.
"""
def play_rounds(self, match):
players = match[0]
rounds = match[1]
for i in range(rounds):
self.start_round(players)
moves = []
for p in players:
moves.append(p.play())
result = self.get_result(moves)
self.end_round(players, moves, result)
# notifies players tournament has begun
def begin_tournament(self):
pass
# Announces results of tournament to all players
def end_tournament(self):
pass
# send a message containing a list of all the players in the current match
def start_match(self, players):
message = Message.Message.get_match_start_message(players)
self.notify_all(message)
# send a message containing the result of the match
def end_match(self, players, result):
message = Message.Message.get_match_end_message(players, result)
self.notify_all(message)
# send a message containing the players in the next game
def start_round(self, players):
message = Message.Message.get_round_start_message(players)
self.notify_all(message)
# send a message containing the players, moves, and result of the last game
def end_round(self, players, moves, result):
#find winner based on the largest score
if(result[0] == result[1]): #if tie, no winner awarded
winner = None
else:
winner = players[result.index(max(result))]
self.scorekeeper.update_tournament(players, winner, result)
message = Message.Message.get_round_end_message(players, moves, result)
self.notify_all(message)
| {
"repo_name": "geebzter/game-framework",
"path": "Tournament.py",
"copies": "1",
"size": "3830",
"license": "apache-2.0",
"hash": 5956504372984971000,
"line_mean": 32.3043478261,
"line_max": 100,
"alpha_frac": 0.6433420366,
"autogenerated": false,
"ratio": 4.070138150903294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5213480187503294,
"avg_score": null,
"num_lines": null
} |
# Alex Ciaramella and Greg Suner
# Abstract Tournament Class
# Tournament is observable while players are observers
import Message
import Observable
import Display
class Tournament(Observable.Observable):
# set up a list of players when tournament is initialized
def __init__(self):
Observable.Observable.__init__(self)
self.playerList = []
self.game = None
def attach_display(self, display):
self.display = display
self.add_observer(self.display)
# Returns the players in the tournament
def get_players(self):
return self.playerList
# run the tournament
def run(self):
self.begin_tournament()
while (True):
match = self.create_next_match()
if match == None:
break
self.play_match(match)
self.end_tournament()
# get a reference to the next game to be played
def create_next_match(self):
pass
# register a player for the tournament by adding them to
# the list of current players
def register_player(self, player):
self.playerList.append(player)
self.add_observer(player)
# stores a reference to the type of game we will be playing
def set_game(self, game):
self.game = game
# Computes the result of a round based on the moves made by the players
def get_result(self, moves):
return self.game.get_result(moves)
# play the next match and return the results
def play_match(self, match):
players = match[0]
self.start_match(players)
result = self.play_rounds(match)
self.end_match(players, result)
# plays each indvidual game in the match
def play_rounds(self, match):
players = match[0]
rounds = match[1]
for i in range(rounds):
self.start_round(players)
moves = []
for p in players:
moves.append(p.play())
result = self.get_result(moves)
self.end_round(players, moves, result)
# notifies players tournament has begun
def begin_tournament(self):
pass
# Announces results of tournament to all players
def end_tournament(self):
pass
# send a message containing a list of all the players in the current match
def start_match(self, players):
message = Message.Message.get_match_start_message(players)
self.notify_all(message)
# send a message containing the result of the match
def end_match(self, players, result):
message = Message.Message.get_match_end_message(players, result)
self.notify_all(message)
# send a message containing the players in the next game
def start_round(self, players):
message = Message.Message.get_round_start_message(players)
self.notify_all(message)
# send a message containing the players, moves, and result of the last game
def end_round(self, players, moves, result):
message = Message.Message.get_round_end_message(players, moves, result)
self.notify_all(message)
| {
"repo_name": "mccler89/RPS_Player",
"path": "Tournament.py",
"copies": "3",
"size": "3118",
"license": "apache-2.0",
"hash": -2208371542740168000,
"line_mean": 29.568627451,
"line_max": 79,
"alpha_frac": 0.6423989737,
"autogenerated": false,
"ratio": 4.075816993464052,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6218215967164052,
"avg_score": null,
"num_lines": null
} |
# Alex Ciaramella and Greg Suner
# Abstract Tournament Class
# Tournament is observable while players are observers
import Message
import Observable
import Display
class Tournament(Observable.Observable):
# set up a list of players when tournament is initialized
def __init__(self):
Observable.Observable.__init__(self)
self.playerList = []
self.game = None
self.display = None
def attach_display(self, display):
self.display = display
self.add_observer(self.display)
# Returns the players in the tournament
def get_players(self):
return self.playerList
# run the tournament
def run(self):
self.begin_tournament()
while True:
match = self.create_next_match()
if match is None:
break
self.play_match(match)
self.end_tournament()
# get a reference to the next game to be played
def create_next_match(self):
pass
# register a player for the tournament by adding them to
# the list of current players
def register_player(self, player):
self.playerList.append(player)
self.add_observer(player)
# stores a reference to the type of game we will be playing
def set_game(self, game):
self.game = game
# Computes the result of a round based on the moves made by the players
def get_result(self, moves):
return self.game.get_result(moves)
# play the next match and return the results
def play_match(self, match):
players = match[0]
self.start_match(players)
result = self.play_rounds(match) # play_rounds should return a value, but doesn't... TODO??
self.end_match(players, result)
# plays each individual game in the match
"""
This function should return a result, but when it does return result,
it stops the match in the preceding play_match function.
This is likely a bug, but I haven't figured out a solution to this.
"""
def play_rounds(self, match):
players = match[0]
rounds = match[1]
for i in range(rounds):
self.start_round(players)
moves = []
for p in players:
moves.append(p.play())
result = self.get_result(moves)
self.end_round(players, moves, result)
# notifies players tournament has begun
def begin_tournament(self):
pass
# Announces results of tournament to all players
def end_tournament(self):
pass
# send a message containing a list of all the players in the current match
def start_match(self, players):
message = Message.Message.get_match_start_message(players)
self.notify_all(message)
# send a message containing the result of the match
def end_match(self, players, result):
message = Message.Message.get_match_end_message(players, result)
self.notify_all(message)
# send a message containing the players in the next game
def start_round(self, players):
message = Message.Message.get_round_start_message(players)
self.notify_all(message)
# send a message containing the players, moves, and result of the last game
def end_round(self, players, moves, result):
message = Message.Message.get_round_end_message(players, moves, result)
self.notify_all(message)
| {
"repo_name": "PaulieC/sprint1_Council_a",
"path": "Tournament.py",
"copies": "2",
"size": "3437",
"license": "apache-2.0",
"hash": 5243066021880614000,
"line_mean": 31.7333333333,
"line_max": 100,
"alpha_frac": 0.6444573756,
"autogenerated": false,
"ratio": 4.116167664670659,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0000942951438000943,
"num_lines": 105
} |
# A lexer in Python
# By Anthony Nguyen
# MIT Licensed
#
# Pretty basic; easily extensible
import re
class Token():
def __init__(self, name, rule, data, start, end):
self.name = name
self.rule = rule
self.data = data
self.start = start
self.end = end
def __str__(self):
return "{0}({1}): {2}".format(self.name, self.rule, self.data)
class Scanner():
def __init__(self, data, tokens, flags, ignoreWhitespace):
self.data = data
self.tokens = tokens
r = []
for name in tokens:
r.append("(?P<{0}>{1})".format(name, tokens[name]))
self.regex = re.compile("|".join(r), *flags)
self.ignoreWhitespace = ignoreWhitespace
self.whitespace = re.compile("\s*", re.MULTILINE)
self.position = 0
def __iter__(self):
return self
def next(self):
if self.ignoreWhitespace:
whitespace = self.whitespace.match(self.data, pos = self.position)
if whitespace is not None:
self.position = whitespace.end()
match = self.regex.match(self.data, pos = self.position)
if match is not None:
self.position = match.end()
return Token(match.lastgroup, self.tokens[match.lastgroup], match.group(match.lastgroup), match.start(), match.end())
else:
raise StopIteration
class Lexer():
tokens = {}
def __init__(self, tokens, *flags):
for name, rule in tokens:
self.tokens[name] = rule
self.flags = list(flags)
def addTokens(self, tokens):
for name, rule in tokens:
self.tokens[name] = rule
def addFlags(self, *flags):
self.flags += list(flags)
def scan(self, data, ignoreWhitespace = False):
return Scanner(data, self.tokens, self.flags, ignoreWhitespace)
if __name__ == "__main__":
lexer = Lexer([
("word", "[a-z]+"),
("shortdate", "\d{1,2}\/\d{1,2}\/\d{4}")
], re.I)
for token in lexer.scan("I was born on", True):
print token | {
"repo_name": "anthonynguyen/pylexer",
"path": "pylexer.py",
"copies": "1",
"size": "1796",
"license": "mit",
"hash": 196452366284821380,
"line_mean": 24.6714285714,
"line_max": 120,
"alpha_frac": 0.6542316258,
"autogenerated": false,
"ratio": 2.963696369636964,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.893040099726889,
"avg_score": 0.0375053996336148,
"num_lines": 70
} |
# A lexer used by all the parsers
# A simple lexer for C like expressions. The major difference with C is that consecutive operators have
# to be separated with white spaces.
import re
class Token:
def __init__(self, kind, lexem):
self.kind = kind
self.lexem = lexem
def __repr__(self):
return '<Token {} \'{}\'>'.format(self.kind, self.lexem)
def tokenize(code):
token_specification = [
('NUMBER', r'\d+(\.\d*)?'),
('ID', r'[A-Za-z_][A-Za-z0-9_]*'),
('OPER', r'[-~+*/%=<>?!:|&^@]+'),
('SYNT', r'[][(),.]'),
('SKIP', r'[ \t\n]+'),
('MISMATCH', r'.'),
]
tok_regex = '|'.join('(?P<%s>%s)' % pair for pair in token_specification)
for mo in re.finditer(tok_regex, code):
kind = mo.lastgroup
value = mo.group(kind)
if kind == 'SKIP':
pass
elif kind == 'MISMATCH':
raise RuntimeError('{} unexpected'.format(value))
else:
yield Token(kind, value)
| {
"repo_name": "bourguet/operator_precedence_parsing",
"path": "lexer.py",
"copies": "1",
"size": "1039",
"license": "bsd-2-clause",
"hash": -4707189783009254000,
"line_mean": 28.6857142857,
"line_max": 104,
"alpha_frac": 0.5043310876,
"autogenerated": false,
"ratio": 3.4177631578947367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44220942454947365,
"avg_score": null,
"num_lines": null
} |
'''Alexey is trying to develop a program for a very simple microcontroller. It makes readings from various sensors over time, and these readings must happen at specific regular times. Unfortunately, if two of these readings occur at the same time, the microcontroller freezes and must be reset.
There are N different sensors that read data on a regular basis. For each i from 1 to N, the reading from sensor i will occur every Ai milliseconds with the first reading occurring exactly Ai milliseconds after the microcontroller is powered up. Each reading takes precisely one millisecond on Alexey's microcontroller.
Alexey wants to know when the microcontroller will freeze after he turns it on.
Input
The first line of the input contains an integer T denoting the number of test cases. The description of T test cases follows.
The first line contains single integer N denoting the number of sensors.
The second line contains N space-separated integers A1, A2, ..., AN denoting frequency of measurements. Namely, sensor i will be read every Ai milliseconds with the first reading occurring Ai milliseconds after the microcontroller is first turned on.
Output
For each test case, output a single line containing the number of milliseconds until the microcontroller freezes.
Constraints
1 ? T ? 10
2 ? N ? 500
1 ? Ai ? 109
Subtasks
Subtask #1 (10 points) 1 ? T ? 10, 2 ? N ? 9, 1 ? Ai ? 500
Subtask #2 (20 points) 1 ? T ? 10, 2 ? N ? 500, 1 ? Ai ? 1000
Subtask #3 (70 points) original constraints
Example
Input:
3
3
2 3 5
4
1 8 7 11
4
4 4 5 6
Output:
6
7
4
Explanation
Case 1: in 6 milliseconds, the third reading will be attempted from the 1st sensor and the second reading will be attempted from the 2nd sensor.
Case 2: in 7 milliseconds the seventh reading will be attempted from the 1st sensor and the first reading will be attempted from the 3rd sensor.
Case 3: in 4 milliseconds, the first readings from the first two sensors will be attempted.
'''
def LCM(x,y):
i=0
while(1):
z=x*i
if z!=0 and z%y==0:
return z
i+=1
T=input()
for t in range(T):
n=input()
f=raw_input()
b=''
mind=100000000000
for i in range(len(f)):
if f[i]!=' ':
b+=f[i]
if len(b)==n:
for i in list(b):
for j in list(b):
if list(b).index(i)!=list(b).index(j):
d=LCM(int(i),int(j))
if d<mind:
mind=d
print mind
| {
"repo_name": "OMEHA/HELLO-WORLD",
"path": "PYTHON-CODECHEF_ALEXTASK.py",
"copies": "1",
"size": "2580",
"license": "unlicense",
"hash": -8358206587142630000,
"line_mean": 29.8518518519,
"line_max": 319,
"alpha_frac": 0.665503876,
"autogenerated": false,
"ratio": 3.649222065063649,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4814725941063649,
"avg_score": null,
"num_lines": null
} |
# Alex Goudine
# GEOG 490 - Webscraping and Database Design
# Scrapes weather data from forecast.io and returns a dict of the relevant information
# Modified by Taylor Denouden
# Shortened script and made into a simple function in which geom and date data can be passed
# Added more efficient and robust cardinal direction lookup
# Removed test for unlimited visibility to maintain data type consistency in database
# Removed test to see if rider was travelling in the same direction in favor of storing the cardinal wind direction only
# Updated docstring
import urllib.request
import json
from datetime import datetime
import time
from django.conf import settings
def get_weather(coords, date):
""" Generate a dict of weather data for a location at a given time
Keyword arguments:
coords -- decimal degree coordinates of location. Format is [longitude, latitude]
date -- a python datetime object
"""
(lng, lat) = coords
DIRECTIONS = ["N", "NE", "E", "SE", "S", "SW", "W", "NW"]
# A call is made to the API using the provided key
APIkey = settings.FORECAST_IO_API_KEY
physicalURL = "https://api.forecast.io/forecast/"+APIkey+"/"+str(lat)+","+str(lng)+","+datetime.isoformat(date)+"?units=ca"
response = json.loads( urllib.request.urlopen(physicalURL).read() )
c = response['currently']
d = response['daily']['data'][0]
sunrise = d.get('sunriseTime', None)
sunset = d.get('sunsetTime', None)
return {
'summary': c.get('summary', ''),
'sunrise_time': datetime.utcfromtimestamp(sunrise + response['offset']*60*60) if sunrise else None,
'sunset_time': datetime.utcfromtimestamp(sunset + response['offset']*60*60) if sunset else None,
'dawn': (sunrise-30*60 <= time.mktime(date.timetuple()) <= sunrise) if sunrise else False,
'dusk': (sunset <= time.mktime(date.timetuple()) <= sunrise+30*60) if sunrise else False,
'precip_intensity': c.get('precipIntensity', -1),
'precip_probability': c.get('precipProbability', -1),
'precip_type': c.get('precipType', ""),
'temperature': c.get('temperature', -1),
'black_ice_risk': c.get('temperature', 100) <= -18 or (c.get('precipIntensity', -1) > 0 and c.get('temperature', 100) <= 0),
'wind_speed': c.get('windSpeed', -1),
'wind_bearing': c.get('windBearing', -1),
'wind_bearing_str': DIRECTIONS[int((c.get('windBearing') + 22.5) // 45 % 8)] if c.get('windBearing') else "",
'visibility_km': c.get('visibility', -1), # if visibilityKM == 16.09 it is unlimited
}
| {
"repo_name": "SPARLab/BikeMaps",
"path": "mapApp/utils/weather.py",
"copies": "1",
"size": "2587",
"license": "mit",
"hash": -2301689683614506500,
"line_mean": 46.9074074074,
"line_max": 132,
"alpha_frac": 0.6671820642,
"autogenerated": false,
"ratio": 3.500676589986468,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46678586541864675,
"avg_score": null,
"num_lines": null
} |
# Alex Gould
# 9/26/15
# COMP50CP
# transform_image.py - Uses multithreading to concurrently alter
# sections of an image.
# Since the threads never share resources, there's no real need to
# coordinate them, apart from making sure they're all done
# before continuing with the program
from threading import Thread
import sys
from PIL import Image
global MAX_THREADS
def switch_r_b((r,g,b)):
return (b,g,r)
def negate((r,g,b)):
return (255-r, 255-g, 255-b)
def greenonly((r,g,b)):
return (0,255,0)
tfDict = {"switch-r-b":switch_r_b, "negate":negate, "green":greenonly}
def transformSection(pixels, tf, x, width, y, height):
temp_x = x
for i in range(width):
temp_y = y
for j in range(height):
try:
pixels[temp_x,temp_y] = tf(pixels[temp_x,temp_y])
except:
#print "Error at " + str(x) + ", " + str(temp_y)
break
temp_y = temp_y+1
temp_x = temp_x+1
def threading(infile, outfile, tf, numrows, numcols):
pic = Image.open(infile)
width, height = pic.size
if numcols > width:
numcols = width
if numrows > height:
numrows = height
xcoords = []
ycoords = []
xcount = 0
ycount = 0
colwidth = width/numcols
rowheight = height/numrows
while xcount < width:
ycount = 0
while ycount < height:
xcoords.append(xcount)
ycoords.append(ycount)
ycount = ycount + rowheight
xcount = xcount + colwidth
pixels = pic.load()
threads = []
#print "Width: " + str(width)
#print "Height: " + str(height)
#print xcoords
#print ycoords
#print xcoords
#print ycoords
for i in range(len(xcoords)):
#print "X: " + str(xcoords[i])
#print "Y: " + str(ycoords[i])
threads.append(Thread(target = transformSection, \
args = (pixels, tf, xcoords[i], colwidth, \
ycoords[i], rowheight)))
for i in threads:
i.start()
for i in threads:
i.join()
pic.save(outfile)
def main(argv):
MAX_THREADS = 200
if len(argv) != 4 and len(argv) != 6:
sys.stderr.write("Wrong number of arguments!\n");
sys.exit(1)
infile = argv[1]
outfile = argv[2]
func = argv[3]
if len(argv) == 6:
rows = int(argv[4])
cols = int(argv[5])
else:
rows = 2
cols = 2
if rows*cols > MAX_THREADS:
sys.stderr.write("Too many threads!\n")
sys.exit(1)
threading(infile, outfile, tfDict[func], rows, cols)
if __name__ == '__main__':
main(sys.argv)
| {
"repo_name": "lyra833/Stufts2",
"path": "Concurrent Systems/Homework/HW2/image_transform.py",
"copies": "1",
"size": "2292",
"license": "mit",
"hash": -7288887728823608000,
"line_mean": 21.6930693069,
"line_max": 70,
"alpha_frac": 0.6544502618,
"autogenerated": false,
"ratio": 2.628440366972477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37828906287724773,
"avg_score": null,
"num_lines": null
} |
#Alex Holcombe alex.holcombe@sydney.edu.au
#See the github repository for more information: https://github.com/alexholcombe/twoWords
from __future__ import print_function
from psychopy import monitors, visual, event, data, logging, core, sound, gui
import psychopy.info
import numpy as np
from math import atan, log, ceil
import copy
import time, sys, os#, pylab
try:
from noiseStaircaseHelpers import printStaircase, toStaircase, outOfStaircase, createNoise, plotDataAndPsychometricCurve
except ImportError:
print('Could not import from noiseStaircaseHelpers.py (you need that file to be in the same directory)')
try:
import stringResponse
except ImportError:
print('Could not import stringResponse.py (you need that file to be in the same directory)')
wordEccentricity=3 #means degrees of angle away from fixation
tasks=['T1']; task = tasks[0]
#THINGS THAT COULD PREVENT SUCCESS ON A STRANGE MACHINE
#same screen or external screen? Set scrn=0 if one screen. scrn=1 means display stimulus on second screen.
#widthPix, heightPix
quitFinder = False #if checkRefreshEtc, quitFinder becomes True
autopilot=False
demo=False #False
exportImages= False #quits after one trial
subject='Hubert' #user is prompted to enter true subject name
if autopilot: subject='auto'
if os.path.isdir('.'+os.sep+'data'):
dataDir='data'
else:
print('"data" directory does not exist, so saving data in present working directory')
dataDir='.'
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
showRefreshMisses=True #flicker fixation at refresh rate, to visualize if frames missed
feedback=True
autoLogging=False
refreshRate = 60.; #100
if demo:
refreshRate = 60.; #100
staircaseTrials = 25
prefaceStaircaseTrialsN = 20 #22
prefaceStaircaseNoise = np.array([5,20,20,20, 50,50,50,5,80,80,80,5,95,95,95]) #will be recycled / not all used, as needed
descendingPsycho = True #psychometric function- more noise means worse performance
threshCriterion = 0.58
numWordsInStream = 24
wordsUnparsed="the, and, for, you, say, but, his, not, she, can, who, get, her, all, one, out, see, him, now, how, its, our, two, way" #24 most common words
lettersUnparsed = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".upper()
wordList = wordsUnparsed.split(",") #split into list
for i in range(len(wordList)):
wordList[i] = wordList[i].replace(" ", "") #delete spaces
bgColor = [-.7,-.7,-.7] # [-1,-1,-1]
cueColor = [1.,1.,1.]
letterColor = [1.,1.,1.]
cueRadius = 7 #6 deg in Goodbourn & Holcombe
widthPix= 1280 #monitor width in pixels of Agosta
heightPix= 800 #800 #monitor height in pixels
monitorwidth = 38.7 #monitor width in cm
scrn=1 #0 to use main screen, 1 to use external screen connected to computer
fullscr=True #True to use fullscreen, False to not. Timing probably won't be quite right if fullscreen = False
allowGUI = False
if demo: monitorwidth = 23#18.0
if exportImages:
widthPix = 600; heightPix = 600
monitorwidth = 13.0
fullscr=False; scrn=0
framesSaved=0
if demo:
scrn=0; fullscr=False
widthPix = 800; heightPix = 600
monitorname='testMonitor'
allowGUI = True
viewdist = 57. #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)
print('pixelperdegree=',pixelperdegree)
# create a dialog from dictionary
infoFirst = { 'Do staircase (only)': False, 'Check refresh etc':True, 'Fullscreen (timing errors if not)': False, 'Screen refresh rate':refreshRate }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='Dual-RSVP experiment OR staircase to find thresh noise level for performance criterion',
order=['Do staircase (only)', 'Check refresh etc', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating'},
#fixed=['Check refresh etc'])#this attribute can't be changed by the user
)
if not OK.OK:
print('User cancelled from dialog box'); core.quit()
doStaircase = infoFirst['Do staircase (only)']
checkRefreshEtc = infoFirst['Check refresh etc']
fullscr = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
if checkRefreshEtc:
quitFinder = True
if quitFinder:
import os
applescript="\'tell application \"Finder\" to quit\'"
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
#letter size 2.5 deg #I've changed the SOA and letterDurMs
SOAms = 180 #Battelli, Agosta, Goodbourn, Holcombe mostly using 133
#Minimum SOAms should be 84 because any shorter, I can't always notice the second ring when lag1. 71 in Martini E2 and E1b (actually he used 66.6 but that's because he had a crazy refresh rate of 90 Hz)
letterDurMs = 400 #23.6 in Martini E2 and E1b (actually he used 22.2 but that's because he had a crazy refresh rate of 90 Hz)
ISIms = SOAms - letterDurMs
letterDurFrames = int( np.floor(letterDurMs / (1000./refreshRate)) )
cueDurFrames = letterDurFrames
ISIframes = int( np.floor(ISIms / (1000./refreshRate)) )
#have set ISIframes and letterDurFrames to integer that corresponds as close as possible to originally intended ms
rateInfo = 'total SOA=' + str(round( (ISIframes + letterDurFrames)*1000./refreshRate, 2)) + ' or ' + str(ISIframes + letterDurFrames) + ' frames, comprising\n'
rateInfo+= 'ISIframes ='+str(ISIframes)+' or '+str(ISIframes*(1000./refreshRate))+' ms and letterDurFrames ='+str(letterDurFrames)+' or '+str(round( letterDurFrames*(1000./refreshRate), 2))+'ms'
logging.info(rateInfo); print(rateInfo)
trialDurFrames = int( numWordsInStream*(ISIframes+letterDurFrames) ) #trial duration in frames
monitorname = 'testmonitor'
waitBlank = False
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#relying on monitorwidth cm (39 for Mitsubishi to do deg calculations) and gamma info in calibratn
mon.setSizePix( (widthPix,heightPix) )
units='deg' #'cm'
def openMyStimWindow(): #make it a function because have to do it several times, want to be sure is identical each time
myWin = visual.Window(monitor=mon,size=(widthPix,heightPix),allowGUI=allowGUI,units=units,color=bgColor,colorSpace='rgb',fullscr=fullscr,screen=scrn,waitBlanking=waitBlank) #Holcombe lab monitor
return myWin
myWin = openMyStimWindow()
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: #checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
#print(runInfo)
logging.info(runInfo)
print('Finished runInfo- which assesses the refresh and processes of this computer')
#check screen refresh is what assuming it is ##############################################
Hzs=list()
myWin.flip(); myWin.flip();myWin.flip();myWin.flip();
myWin.setRecordFrameIntervals(True) #otherwise myWin.fps won't work
print('About to measure frame flips')
for i in range(50):
myWin.flip()
Hzs.append( myWin.fps() ) #varies wildly on successive runs!
myWin.setRecordFrameIntervals(False)
# end testing of screen refresh########################################################
Hzs = np.array( Hzs ); Hz= np.median(Hzs)
msPerFrame= 1000./Hz
refreshMsg1= 'Frames per second ~='+ str( np.round(Hz,1) )
refreshRateTolerancePct = 3
pctOff = abs( (np.median(Hzs)-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
defaultNoiseLevel = 0.0 #to use if no staircase, can be set by user
trialsPerCondition = 5 #default value
dlgLabelsOrdered = list()
if doStaircase:
myDlg = gui.Dlg(title="Staircase to find appropriate noisePercent", pos=(200,400))
else:
myDlg = gui.Dlg(title="RSVP experiment", pos=(200,400))
if not autopilot:
myDlg.addField('Subject name (default="Hubert"):', 'Hubert', tip='or subject code')
dlgLabelsOrdered.append('subject')
if doStaircase:
easyTrialsCondText = 'Num preassigned noise trials to preface staircase with (default=' + str(prefaceStaircaseTrialsN) + '):'
myDlg.addField(easyTrialsCondText, tip=str(prefaceStaircaseTrialsN))
dlgLabelsOrdered.append('easyTrials')
myDlg.addField('Staircase trials (default=' + str(staircaseTrials) + '):', tip="Staircase will run until this number is reached or it thinks it has precise estimate of threshold")
dlgLabelsOrdered.append('staircaseTrials')
pctCompletedBreak = 101
else:
myDlg.addField('\tPercent noise dots=', defaultNoiseLevel, tip=str(defaultNoiseLevel))
dlgLabelsOrdered.append('defaultNoiseLevel')
myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))
dlgLabelsOrdered.append('trialsPerCondition')
pctCompletedBreak = 50
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Screen apparently NOT the desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels!!'
myDlg.addText(msgWrongResolution, color='Red')
logging.error(msgWrongResolution)
print(msgWrongResolution)
myDlg.addText('Note: to abort press ESC at a trials response screen', color=[-1.,1.,-1.]) # color='DimGrey') color names stopped working along the way, for unknown reason
myDlg.show()
if myDlg.OK: #unpack information from dialogue box
thisInfo = myDlg.data #this will be a list of data returned from each field added in order
if not autopilot:
name=thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: #if entered something
subject = name #change subject default name to what user entered
if doStaircase:
if len(thisInfo[dlgLabelsOrdered.index('staircaseTrials')]) >0:
staircaseTrials = int( thisInfo[ dlgLabelsOrdered.index('staircaseTrials') ] ) #convert string to integer
print('staircaseTrials entered by user=',staircaseTrials)
logging.info('staircaseTrials entered by user=',staircaseTrials)
if len(thisInfo[dlgLabelsOrdered.index('easyTrials')]) >0:
prefaceStaircaseTrialsN = int( thisInfo[ dlgLabelsOrdered.index('easyTrials') ] ) #convert string to integer
print('prefaceStaircaseTrialsN entered by user=',thisInfo[dlgLabelsOrdered.index('easyTrials')])
logging.info('prefaceStaircaseTrialsN entered by user=',prefaceStaircaseTrialsN)
else: #not doing staircase
trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer
print('trialsPerCondition=',trialsPerCondition)
logging.info('trialsPerCondition =',trialsPerCondition)
defaultNoiseLevel = int (thisInfo[ dlgLabelsOrdered.index('defaultNoiseLevel') ])
else:
print('User cancelled from dialog box.')
logging.flush()
core.quit()
if not demo:
allowGUI = False
myWin = openMyStimWindow()
#set up output data file, log file, copy of program code, and logging
infix = ''
if doStaircase:
infix = 'staircase_'
fileName = os.path.join(dataDir, subject + '_' + infix+ timeAndDateStr)
if not demo and not exportImages:
dataFile = open(fileName+'.txt', 'w')
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileName + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logFname = fileName+'.log'
ppLogF = logging.LogFile(logFname,
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#errors, data and warnings will be sent to this logfile
if demo or exportImages:
dataFile = sys.stdout; logF = sys.stdout
logging.console.setLevel(logging.ERROR) #only show this level messages and higher
logging.console.setLevel(logging.ERROR) #DEBUG means set console to receive nearly all messges, INFO next level, EXP, DATA, WARNING and ERROR
if fullscr and not demo and not exportImages:
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=False, ## True means report on everything
userProcsDetailed=True, ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
#randomSeed='set:42', ## a way to record, and optionally set, a random seed of type str for making reproducible random sequences
## None -> default
## 'time' will use experimentRuntime.epoch as the value for the seed, different value each time the script is run
##'set:time' --> seed value is set to experimentRuntime.epoch, and initialized: random.seed(info['randomSeed'])
##'set:42' --> set & initialize to str('42'), and will give the same sequence of random.random() for all runs of the script
)
logging.info(runInfo)
logging.flush()
wordListThisTrial = list() #to put in KiuYan stuff
textStimuliStream1 = list()
textStimuliStream2 = list() #used for second, simultaneous RSVP stream
def calcAndPredrawStimuli(thisTrial,wordList): #called before each trial
global wordListThisTrial, textStimuliStream1, textStimuliStream2
textStimuliStream1 = list()
textStimuliStream2 = list()
if len(wordList) < numWordsInStream:
print('Error! Your word list must have at least ',numWordsInStream,'strings')
idxsIntoWordList = np.arange( len(wordList) ) #create a list of indexes of the entire word liste.g 0, 1, 2, 3, 4, 5 up to 23
print('wordList=',wordList)
#if the condition is the orthographically similar one
orthographicallySimilarLists = [['bed','now','ban','tap','ton','dab','paw','map','win'],['top','bib','ten','box','wet','man','urn','pit','but']]
orthographicallyDissimilarLists = [['off','egg','her','for','elk','joy','zoo','fax','yes'],['sky','fox','fig','elf','ski','zig','cog','woo','his']]
print('thisTrial[orthographicalsim]= ',thisTrial['orthographicalsim'])
whichList = np.random.randint(0, 1)
if thisTrial['orthographicalsim'] == True:
listForThisCondition = copy.deepcopy(orthographicallySimilarLists[whichList])
elif thisTrial['orthographicalsim'] == False:
listForThisCondition = copy.deepcopy(orthographicallyDissimilarLists[whichList])
print('Using dissimilar list')
listForThisConditionLeft = copy.deepcopy(listForThisCondition)
listForThisConditionRight = copy.deepcopy(listForThisCondition)
np.random.shuffle(listForThisConditionLeft)
np.random.shuffle(listForThisConditionRight)
print('listForThisConditionLeft=',listForThisConditionLeft)
print('length of wordList before kiuyan modification=',len(wordList))
wordListThisTrial = copy.deepcopy(wordList) #to put in KiuYan stuff
for word in listForThisConditionLeft:
print('adding left intron to position ',len(wordListThisTrial), ' in wordListThisTrial')
#print('inside loop:',word)
wordListThisTrial.append(word) #add new words to end of wordList
for word in listForThisConditionRight: #after the left ones, add to the wordList even more by putting the right ones
print('adding right intron to position ',len(wordListThisTrial), ' in wordListThisTrial')
#print('inside loop:',word)
wordListThisTrial.append(word) #add new words to end of wordList
numWordsToLoadUp = numWordsInStream + 2*len(listForThisCondition)
print('numWordsToLoadUp=',numWordsToLoadUp, ' length of wordListThisTrial=',len(wordListThisTrial))
for i in range(0,numWordsToLoadUp): #draw the words that will be used on this trial, the first 26 of the shuffled list
word = wordListThisTrial[ i ] # #[ idxsIntoWordList[i] ] the below is the way that Psychopy draws texton screen
#bucket of words on left stream
textStimulusStream1 = visual.TextStim(myWin,text=word,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)
#bucket of words on right stream
textStimulusStream2 = visual.TextStim(myWin,text=word,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)
textStimulusStream1.setPos([-wordEccentricity,0]) #left this is bucket of words
textStimuliStream1.append(textStimulusStream1) #add to list of text stimuli that comprise stream 1
textStimulusStream2.setPos([wordEccentricity,0]) #right this is bucket of words
textStimuliStream2.append(textStimulusStream2) #add to list of text stimuli
print('Kiuyan modified wordListThisTrial =',wordListThisTrial)
idxsStream1 = idxsIntoWordList #first RSVP stream
np.random.shuffle(idxsStream1)
idxsStream2 = copy.deepcopy(idxsIntoWordList)
np.random.shuffle(idxsStream2)
toSubstituteOnLeft = range(9)
np.random.shuffle(toSubstituteOnLeft)
toSubstituteOnRight = range(9)
np.random.shuffle(toSubstituteOnRight)
for i in range(9):
idxOfWordToSubstituteLeft = numWordsInStream+toSubstituteOnLeft[i]
#print('Using # ',idxOfWordToSubstituteLeft,' as intron for left')
idxsStream1[i+8] = idxOfWordToSubstituteLeft
idxOfWordToSubstituteRight = numWordsInStream + 9 + toSubstituteOnRight[i]
print('Using # ',idxOfWordToSubstituteRight,' as intron for right')
idxsStream2[i+8] = idxOfWordToSubstituteRight
print('idxsStream1=',idxsStream1, ' idxsStream2=',idxsStream2)
return idxsStream1, idxsStream2
#create click sound for keyboard
try:
click=sound.Sound('406__tictacshutup__click-1-d.wav')
except: #in case file missing, create inferiro click manually
logging.warn('Could not load the desired click sound file, instead using manually created inferior click')
click=sound.Sound('D',octave=4, sampleRate=22050, secs=0.015, bits=8)
if showRefreshMisses:
fixSizePix = 32 #2.6 #make fixation bigger so flicker more conspicuous
else: fixSizePix = 32
fixColor = [1,1,1]
if exportImages: fixColor= [0,0,0]
fixatnNoiseTexture = np.round( np.random.rand(fixSizePix/4,fixSizePix/4) ,0 ) *2.0-1 #Can counterphase flicker noise texture to create salient flicker if you break fixation
fixation= visual.PatchStim(myWin, tex=fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False)
fixationBlank= visual.PatchStim(myWin, tex= -1*fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False) #reverse contrast
fixationPoint= visual.PatchStim(myWin,tex='none',colorSpace='rgb',color=(1,1,1),size=4,units='pix',autoLog=autoLogging)
respPromptStim = visual.TextStim(myWin,pos=(0, -.9),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim = visual.TextStim(myWin,pos=(0, -.8),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim.setText('Hit ENTER to accept. Backspace to edit')
respStim = visual.TextStim(myWin,pos=(0,0),colorSpace='rgb',color=(1,1,0),alignHoriz='center', alignVert='center',height=3,units='deg',autoLog=autoLogging)
clickSound, badKeySound = stringResponse.setupSoundsForResponse()
requireAcceptance = False
nextText = visual.TextStim(myWin,pos=(0, .1),colorSpace='rgb',color = (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
NextRemindCountText = visual.TextStim(myWin,pos=(0,.2),colorSpace='rgb',color= (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
screenshot= False; screenshotDone = False
stimList = []
#SETTING THE CONDITIONS
cuePositions = np.array([10, 11, 12, 13, 14]) #changed this experiment from 6-10 to 10-14 so that its centred in the stream
for cuePos in cuePositions:
for rightResponseFirst in [False,True]:
for bothWordsFlipped in [True]:
for orthographicalsim in [True,False]:
stimList.append( {'cuePos':cuePos, 'rightResponseFirst':rightResponseFirst, 'flipHoriz':bothWordsFlipped, 'flipVert':bothWordsFlipped,
'leftStreamFlip':bothWordsFlipped, 'orthographicalsim':orthographicalsim, 'rightStreamFlip':bothWordsFlipped} ) # add 'orthographicalsim':orthographical similarity
trials = data.TrialHandler(stimList,trialsPerCondition,method='sequential') #'random' #constant stimuli method
trialsForPossibleStaircase = data.TrialHandler(stimList,trialsPerCondition) #independent randomization, just to create random trials for staircase phase
numRightWrongEachCuepos = np.zeros([ len(cuePositions), 1 ]); #summary results to print out at end
logging.info( 'numtrials=' + str(trials.nTotal) + ' and each trialDurFrames='+str(trialDurFrames)+' or '+str(trialDurFrames*(1000./refreshRate))+ \
' ms' + ' task=' + task)
def numberToLetter(number): #0 = A, 25 = Z
#if it's not really a letter, return @
if number < 0 or number > 25:
return ('@')
else: #it's probably a letter
try:
return chr( ord('A')+number )
except:
return('@')
def letterToNumber(letter): #A = 0, Z = 25
#if it's not really a letter, return -999
#HOW CAN I GENERICALLY TEST FOR LENGTH. EVEN IN CASE OF A NUMBER THAT' SNOT PART OF AN ARRAY?
try:
#if len(letter) > 1:
# return (-999)
if letter < 'A' or letter > 'Z':
return (-999)
else: #it's a letter
return ord(letter)-ord('A')
except:
return (-999)
def wordToIdx(word,wordList):
#if it's not in the list of stimuli, return -999
try:
#http://stackoverflow.com/questions/7102050/how-can-i-get-a-python-generator-to-return-none-rather-than-stopiteration
firstMatchIdx = next((i for i, val in enumerate(wordList) if val.upper()==word), None) #return i (index) unless no matches, in which case return None
#print('Looked for ',word,' in ',wordList,'\nfirstMatchIdx =',firstMatchIdx)
return firstMatchIdx
except:
print('Unexpected error in wordToIdx with word=',word)
return (None)
#print header for data file
print('experimentPhase\ttrialnum\tsubject\ttask\t',file=dataFile,end='')
print('noisePercent\tleftStreamFlip\trightStreamFlip\tflipHoriz\tflipVert\torthographicalsim\t',end='',file=dataFile)#added tabs for the two flips
if task=='T1':
numRespsWanted = 2
dataFile.write('rightResponseFirst\t')
for i in range(numRespsWanted):
dataFile.write('cuePos'+str(i)+'\t') #have to use write to avoid ' ' between successive text, at least until Python 3
dataFile.write('answer'+str(i)+'\t')
dataFile.write('response'+str(i)+'\t')
dataFile.write('correct'+str(i)+'\t')
dataFile.write('responsePosRelative'+str(i)+'\t')
print('timingBlips',file=dataFile)
#end of header
def oneFrameOfStim( n,cue,seq1,seq2,cueDurFrames,letterDurFrames,ISIframes,thisTrial,textStimuliStream1,textStimuliStream2,
noise,proportnNoise,allFieldCoords,numNoiseDots ):
#defining a function to draw each frame of stim.
#seq1 is an array of indices corresponding to the appropriate pre-drawn stimulus, contained in textStimuli
SOAframes = letterDurFrames+ISIframes
cueFrames = thisTrial['cuePos']*SOAframes #cuesPos is global variable
stimN = int( np.floor(n/SOAframes) )
frameOfThisLetter = n % SOAframes #every SOAframes, new letter
showLetter = frameOfThisLetter < letterDurFrames #if true, it's not time for the blank ISI. it's still time to draw the letter
#print 'n=',n,' SOAframes=',SOAframes, ' letterDurFrames=', letterDurFrames, ' (n % SOAframes) =', (n % SOAframes) #DEBUGOFF
thisStimIdx = seq1[stimN] #which letter, from A to Z (1 to 26), should be shown?
if seq2 is not None:
thisStim2Idx = seq2[stimN]
#so that any timing problems occur just as often for every frame, always draw the letter and the cue, but simply draw it in the bgColor when it's not meant to be on
cue.setLineColor( bgColor )
if type(cueFrames) not in [tuple,list,np.ndarray]: #scalar. But need collection to do loop based on it
cueFrames = list([cueFrames])
for cueFrame in cueFrames: #check whether it's time for any cue
if n>=cueFrame and n<cueFrame+cueDurFrames:
cue.setLineColor( cueColor )
if showLetter:
textStimuliStream1[thisStimIdx].setColor( letterColor )
#print('thisStimIdx=',thisStimIdx,' thisStim2Idx=',thisStim2Idx)
textStimuliStream2[thisStim2Idx].setColor( letterColor )
else:
textStimuliStream1[thisStimIdx].setColor( bgColor )
textStimuliStream2[thisStim2Idx].setColor( bgColor )
textStimuliStream1[thisStimIdx].flipVert = True #added this and the next line in to try to invert
textStimuliStream2[thisStim2Idx].flipVert = True
textStimuliStream1[thisStimIdx].flipHoriz = thisTrial['leftStreamFlip']
textStimuliStream2[thisStim2Idx].flipHoriz = thisTrial['rightStreamFlip']
textStimuliStream1[thisStimIdx].draw()
textStimuliStream2[thisStim2Idx].draw()
cue.draw()
refreshNoise = False #Not recommended because takes longer than a frame, even to shuffle apparently. Or may be setXYs step
if proportnNoise>0 and refreshNoise:
if frameOfThisLetter ==0:
np.random.shuffle(8, 9, 10, 11, 12, 13, 14, 15, 16)
dotCoords = allFieldCoords[0:numNoiseDots]
noise.setXYs(dotCoords)
if proportnNoise>0:
noise.draw()
return True
# #######End of function definition that displays the stimuli!!!! #####################################
#############################################################################################################################
cue = visual.Circle(myWin,
radius=cueRadius,#Martini used circles with diameter of 12 deg
lineColorSpace = 'rgb',
lineColor=bgColor,
lineWidth=4.0, #in pixels. Was thinner (2 pixels) in letter AB experiments
units = 'deg',
fillColorSpace = 'rgb',
fillColor=None, #beware, with convex shapes fill colors don't work
pos= [0,0], #the anchor (rotation and vertices are position with respect to this)
interpolate=True,
autoLog=False)#this stim changes too much for autologging to be useful
ltrHeight = 2.5 #Martini letters were 2.5deg high
#All noise dot coordinates ultimately in pixels, so can specify each dot is one pixel
noiseFieldWidthDeg=ltrHeight *1.0
noiseFieldWidthPix = int( round( noiseFieldWidthDeg*pixelperdegree ) )
def timingCheckAndLog(ts,trialN):
#check for timing problems and log them
#ts is a list of the times of the clock after each frame
interframeIntervs = np.diff(ts)*1000
#print ' interframe intervs were ',around(interframeIntervs,1) #DEBUGOFF
frameTimeTolerance=.3 #proportion longer than refreshRate that will not count as a miss
longFrameLimit = np.round(1000/refreshRate*(1.0+frameTimeTolerance),2)
idxsInterframeLong = np.where( interframeIntervs > longFrameLimit ) [0] #frames that exceeded 150% of expected duration
numCasesInterframeLong = len( idxsInterframeLong )
if numCasesInterframeLong >0 and (not demo):
longFramesStr = 'ERROR,'+str(numCasesInterframeLong)+' frames were longer than '+str(longFrameLimit)+' ms'
if demo:
longFramesStr += 'not printing them all because in demo mode'
else:
longFramesStr += ' apparently screen refreshes skipped, interframe durs were:'+\
str( np.around( interframeIntervs[idxsInterframeLong] ,1 ) )+ ' and was these frames: '+ str(idxsInterframeLong)
if longFramesStr != None:
logging.error( 'trialnum='+str(trialN)+' '+longFramesStr )
if not demo:
flankingAlso=list()
for idx in idxsInterframeLong: #also print timing of one before and one after long frame
if idx-1>=0:
flankingAlso.append(idx-1)
else: flankingAlso.append(np.NaN)
flankingAlso.append(idx)
if idx+1<len(interframeIntervs): flankingAlso.append(idx+1)
else: flankingAlso.append(np.NaN)
flankingAlso = np.array(flankingAlso)
flankingAlso = flankingAlso[np.negative(np.isnan(flankingAlso))] #remove nan values
flankingAlso = flankingAlso.astype(np.integer) #cast as integers, so can use as subscripts
logging.info( 'flankers also='+str( np.around( interframeIntervs[flankingAlso], 1) ) ) #because this is not an essential error message, as previous one already indicates error
#As INFO, at least it won't fill up the console when console set to WARNING or higher
return numCasesInterframeLong
#end timing check
trialClock = core.Clock()
numTrialsCorrect = 0;
numTrialsApproxCorrect = 0; #this is not recorded in the datafile as a variable. #added Trials into the string to define hopefully!
numTrialsEachCorrect= np.zeros( numRespsWanted )
numTrialsEachApproxCorrect= np.zeros( numRespsWanted )
def do_RSVP_stim(thisTrial, seq1, seq2, proportnNoise,trialN):
#relies on global variables:
# textStimuli, logging, bgColor
# thisTrial should have 'cuePos'
global framesSaved #because change this variable. Can only change a global variable if you declare it
cuesPos = [] #will contain the positions in the stream of all the cues (targets)
cuesPos.append(thisTrial['cuePos'])
cuesPos = np.array(cuesPos)
noise = None; allFieldCoords=None; numNoiseDots=0
if proportnNoise > 0: #generating noise is time-consuming, so only do it once per trial. Then shuffle noise coordinates for each letter
(noise,allFieldCoords,numNoiseDots) = createNoise(proportnNoise,myWin,noiseFieldWidthPix, bgColor)
preDrawStimToGreasePipeline = list() #I don't know why this works, but without drawing it I have consistent timing blip first time that draw ringInnerR for phantom contours
cue.setLineColor(bgColor)
preDrawStimToGreasePipeline.extend([cue])
for stim in preDrawStimToGreasePipeline:
stim.draw()
myWin.flip(); myWin.flip()
#end preparation of stimuli
core.wait(.1);
trialClock.reset()
fixatnPeriodMin = 0.3
fixatnPeriodFrames = int( (np.random.rand(1)/2.+fixatnPeriodMin) *refreshRate) #random interval between 800ms and 1.3s
ts = list(); #to store time of each drawing, to check whether skipped frames
for i in range(fixatnPeriodFrames+20): #prestim fixation interval
#if i%4>=2 or demo or exportImages: #flicker fixation on and off at framerate to see when skip frame
# fixation.draw()
#else: fixationBlank.draw()
fixationPoint.draw()
myWin.flip() #end fixation interval
#myWin.setRecordFrameIntervals(True); #can't get it to stop detecting superlong frames
t0 = trialClock.getTime()
for n in range(trialDurFrames): #this is the loop for this trial's stimulus!
worked = oneFrameOfStim( n,cue,seq1,seq2,cueDurFrames,letterDurFrames,ISIframes,thisTrial,textStimuliStream1,textStimuliStream2,
noise,proportnNoise,allFieldCoords,numNoiseDots ) #draw letter and possibly cue and noise on top
fixationPoint.draw()
if exportImages:
myWin.getMovieFrame(buffer='back') #for later saving
framesSaved +=1
myWin.flip()
t=trialClock.getTime()-t0; ts.append(t);
#end of big stimulus loop
myWin.setRecordFrameIntervals(False);
if task=='T1':
respPromptStim.setText('What was circled?',log=False)
else: respPromptStim.setText('Error: unexpected task',log=False)
postCueNumBlobsAway=-999 #doesn't apply to non-tracking and click tracking task
correctAnswerIdxsStream1 = np.array( seq1[cuesPos] )
correctAnswerIdxsStream2 = np.array( seq2[cuesPos] )
#print('correctAnswerIdxsStream1=',correctAnswerIdxsStream1, 'wordList[correctAnswerIdxsStream1[0]]=',wordList[correctAnswerIdxsStream1[0]])
return cuesPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2,ts
def handleAndScoreResponse(passThisTrial,response,responseAutopilot,task,stimSequence,cuePos,correctAnswerIdx):
#Handle response, calculate whether correct, ########################################
#responses are actual characters
#correctAnswer is index into stimSequence
#autopilot is global variable
if autopilot or passThisTrial:
response = responseAutopilot
#print('handleAndScoreResponse correctAnswerIdxs=',correctAnswerIdxs,'\nstimSequence=',stimSequence, '\nwords=',wordList)
correct = 0
approxCorrect = 0
posOfResponse = -999
responsePosRelative = -999
idx = correctAnswerIdx
correctAnswer = wordListThisTrial[idx].upper()
responseString= ''.join(['%s' % char for char in response])
responseString= responseString.upper()
#print('correctAnswer=',correctAnswer ,' responseString=',responseString)
if correctAnswer == responseString:
correct = 1
#print('correct=',correct)
responseWordIdx = wordToIdx(responseString,wordListThisTrial)
if responseWordIdx is None: #response is not in the wordList
posOfResponse = -999
logging.warn('Response was not present in the stimulus stream')
else:
posOfResponse= np.where( responseWordIdx==stimSequence )
posOfResponse= posOfResponse[0] #list with two entries, want first which will be array of places where the response was found in the sequence
if len(posOfResponse) > 1:
logging.error('Expected response to have occurred in only one position in stream')
posOfResponse = posOfResponse[0] #first element of list (should be only one element long
responsePosRelative = posOfResponse - cuePos
approxCorrect = abs(responsePosRelative)<= 3 #Vul efficacy measure of getting it right to within plus/minus
#print('wordToIdx(',responseString,',',wordList,')=',responseWordIdx,' stimSequence=',stimSequence,'\nposOfResponse = ',posOfResponse) #debugON
#print response stuff to dataFile
#header was answerPos0, answer0, response0, correct0, responsePosRelative0
print(cuePos,'\t', end='', file=dataFile)
print(correctAnswer, '\t', end='', file=dataFile) #answer0
print(responseString, '\t', end='', file=dataFile) #response0
print(correct, '\t', end='',file=dataFile) #correct0
print(responsePosRelative, '\t', end='',file=dataFile) #responsePosRelative0
return correct,approxCorrect,responsePosRelative
#end handleAndScoreResponses
def play_high_tone_correct_low_incorrect(correct, passThisTrial=False):
highA = sound.Sound('G',octave=5, sampleRate=6000, secs=.3, bits=8)
low = sound.Sound('F',octave=3, sampleRate=6000, secs=.3, bits=8)
highA.setVolume(0.9)
low.setVolume(1.0)
if correct:
highA.play()
elif passThisTrial:
high= sound.Sound('G',octave=4, sampleRate=2000, secs=.08, bits=8)
for i in range(2):
high.play(); low.play();
else: #incorrect
low.play()
expStop=False
nDoneMain = -1 #change to zero once start main part of experiment
if doStaircase:
#create the staircase handler
useQuest = True
if useQuest:
staircase = data.QuestHandler(startVal = 95,
startValSd = 80,
stopInterval= 1, #sd of posterior has to be this small or smaller for staircase to stop, unless nTrials reached
nTrials = staircaseTrials,
#extraInfo = thisInfo,
pThreshold = threshCriterion, #0.25,
gamma = 1./26,
delta=0.02, #lapse rate, I suppose for Weibull function fit
method = 'quantile', #uses the median of the posterior as the final answer
stepType = 'log', #will home in on the 80% threshold. But stepType = 'log' doesn't usually work
minVal=1, maxVal = 100
)
print('created QUEST staircase')
else:
stepSizesLinear = [.2,.2,.1,.1,.05,.05]
stepSizesLog = [log(1.4,10),log(1.4,10),log(1.3,10),log(1.3,10),log(1.2,10)]
staircase = data.StairHandler(startVal = 0.1,
stepType = 'log', #if log, what do I want to multiply it by
stepSizes = stepSizesLog, #step size to use after each reversal
minVal=0, maxVal=1,
nUp=1, nDown=3, #will home in on the 80% threshold
nReversals = 2, #The staircase terminates when nTrials have been exceeded, or when both nReversals and nTrials have been exceeded
nTrials=1)
print('created conventional staircase')
if prefaceStaircaseTrialsN > len(prefaceStaircaseNoise): #repeat array to accommodate desired number of easyStarterTrials
prefaceStaircaseNoise = np.tile( prefaceStaircaseNoise, ceil( prefaceStaircaseTrialsN/len(prefaceStaircaseNoise) ) )
prefaceStaircaseNoise = prefaceStaircaseNoise[0:prefaceStaircaseTrialsN]
phasesMsg = ('Doing '+str(prefaceStaircaseTrialsN)+'trials with noisePercent= '+str(prefaceStaircaseNoise)+' then doing a max '+str(staircaseTrials)+'-trial staircase')
print(phasesMsg); logging.info(phasesMsg)
#staircaseStarterNoise PHASE OF EXPERIMENT
corrEachTrial = list() #only needed for easyStaircaseStarterNoise
staircaseTrialN = -1; mainStaircaseGoing = False
while (not staircase.finished) and expStop==False: #staircase.thisTrialN < staircase.nTrials
if staircaseTrialN+1 < len(prefaceStaircaseNoise): #still doing easyStaircaseStarterNoise
staircaseTrialN += 1
noisePercent = prefaceStaircaseNoise[staircaseTrialN]
else:
if staircaseTrialN+1 == len(prefaceStaircaseNoise): #add these non-staircase trials so QUEST knows about them
mainStaircaseGoing = True
print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise)
staircase.importData(100-prefaceStaircaseNoise, np.array(corrEachTrial))
printStaircase(staircase, descendingPsycho, briefTrialUpdate=False, printInternalVal=True, alsoLog=False)
try: #advance the staircase
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)
noisePercent = 100. - staircase.next() #will step through the staircase, based on whether told it (addResponse) got it right or wrong
staircaseTrialN += 1
except StopIteration: #Need this here, even though test for finished above. I can't understand why finished test doesn't accomplish this.
print('stopping because staircase.next() returned a StopIteration, which it does when it is finished')
break #break out of the trials loop
#print('staircaseTrialN=',staircaseTrialN)
idxsStream1, idxsStream2 = calcAndPredrawStimuli(wordList)
for i in range(8,16): #make sure each position points to one of the orthographically dis/similar one
print(i)#
idxsStream1
cuesPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2, ts = \
do_RSVP_stim(cuePos, idxsStream1, idxsStream2, noisePercent/100.,staircaseTrialN)
numCasesInterframeLong = timingCheckAndLog(ts,staircaseTrialN)
expStop,passThisTrial,responses,responsesAutopilot = \
stringResponse.collectStringResponse(numRespsWanted,respPromptStim,respStim,acceptTextStim,myWin,clickSound,badKeySound,
requireAcceptance,autopilot,responseDebug=True)
if not expStop:
if mainStaircaseGoing:
print('staircase\t', end='', file=dataFile)
else:
print('staircase_preface\t', end='', file=dataFile)
#header start 'trialnum\tsubject\ttask\t'
print(staircaseTrialN,'\t', end='', file=dataFile) #first thing printed on each line of dataFile
print(subject,'\t',task,'\t', round(noisePercent,2),'\t', end='', file=dataFile)
correct,approxCorrect,responsePosRelative= handleAndScoreResponse(
passThisTrial,responses,responseAutopilot,task,sequenceLeft,cuesPos[0],correctAnswerIdx )
print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile
core.wait(.06)
if feedback:
play_high_tone_correct_low_incorrect(correct, passThisTrial=FALSE)
print('staircaseTrialN=', staircaseTrialN,' noisePercent=',round(noisePercent,3),' T1approxCorrect=',T1approxCorrect) #debugON
corrEachTrial.append(T1approxCorrect)
if mainStaircaseGoing:
staircase.addResponse(T1approxCorrect, intensity = 100-noisePercent) #Add a 1 or 0 to signify a correct/detected or incorrect/missed trial
#print('Have added an intensity of','{:.3f}'.format(100-noisePercent), 'T1approxCorrect =', T1approxCorrect, ' to staircase') #debugON
#ENDING STAIRCASE PHASE
if staircaseTrialN+1 < len(prefaceStaircaseNoise) and (staircaseTrialN>=0): #exp stopped before got through staircase preface trials, so haven't imported yet
print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise[0:staircaseTrialN+1])
staircase.importData(100-prefaceStaircaseNoise[0:staircaseTrialN], np.array(corrEachTrial))
print('framesSaved after staircase=',framesSaved) #debugON
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = ('prefaceStaircase phase' if expStop else '')
msg += ('ABORTED' if expStop else 'Finished') + ' staircase part of experiment at ' + timeAndDateStr
logging.info(msg); print(msg)
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)
#print('staircase.quantile=',round(staircase.quantile(),2),' sd=',round(staircase.sd(),2))
threshNoise = round(staircase.quantile(),3)
if descendingPsycho:
threshNoise = 100- threshNoise
threshNoise = max( 0, threshNoise ) #e.g. ff get all trials wrong, posterior peaks at a very negative number
msg= 'Staircase estimate of threshold = ' + str(threshNoise) + ' with sd=' + str(round(staircase.sd(),2))
logging.info(msg); print(msg)
myWin.close()
#Fit and plot data
fit = None
try:
intensityForCurveFitting = staircase.intensities
if descendingPsycho:
intensityForCurveFitting = 100-staircase.intensities #because fitWeibull assumes curve is ascending
fit = data.FitWeibull(intensityForCurveFitting, staircase.data, expectedMin=1/26., sems = 1.0/len(staircase.intensities))
except:
print("Fit failed.")
plotDataAndPsychometricCurve(staircase,fit,descendingPsycho,threshCriterion)
#save figure to file
pylab.savefig(fileName+'.pdf')
print('The plot has been saved, as '+fileName+'.pdf')
pylab.show() #must call this to actually show plot
else: #not staircase
noisePercent = defaultNoiseLevel
phasesMsg = 'Experiment will have '+str(trials.nTotal)+' trials. Letters will be drawn with superposed noise of ' + "{:.2%}".format(defaultNoiseLevel)
print(phasesMsg); logging.info(phasesMsg)
nDoneMain =0
while nDoneMain < trials.nTotal and expStop==False: #MAIN EXPERIMENT LOOP
if nDoneMain==0:
msg='Starting main (non-staircase) part of experiment'
logging.info(msg); print(msg)
thisTrial = trials.next() #get a proper (non-staircase) trial
sequenceStream1, sequenceStream2 = calcAndPredrawStimuli(thisTrial,wordList)
cuesPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2, ts = \
do_RSVP_stim(thisTrial, sequenceStream1, sequenceStream2, noisePercent/100.,nDoneMain)
numCasesInterframeLong = timingCheckAndLog(ts,nDoneMain)
#call for each response
expStop = list(); passThisTrial = list(); responses=list(); responsesAutopilot=list()
numCharsInResponse = len(wordList[0])
dL = [None]*numRespsWanted #dummy list for null values
expStop = copy.deepcopy(dL); responses = copy.deepcopy(dL); responsesAutopilot = copy.deepcopy(dL); passThisTrial=copy.deepcopy(dL)
responseOrder = range(numRespsWanted)
if thisTrial['rightResponseFirst']: #change order of indices depending on rightResponseFirst. response0, answer0 etc refer to which one had to be reported first
responseOrder.reverse()
for i in responseOrder:
x = 3* wordEccentricity*(i*2-1) #put it 3 times farther out than stimulus, so participant is sure which is left and which right
expStop[i],passThisTrial[i],responses[i],responsesAutopilot[i] = stringResponse.collectStringResponse(
numCharsInResponse,x,respPromptStim,respStim,acceptTextStim,fixationPoint,myWin,clickSound,badKeySound,
requireAcceptance,autopilot,responseDebug=True)
expStop = np.array(expStop).any(); passThisTrial = np.array(passThisTrial).any()
if not expStop:
print('main\t', end='', file=dataFile) #first thing printed on each line of dataFile to indicate main part of experiment, not staircase
print(nDoneMain,'\t', end='', file=dataFile)
print(subject,'\t',task,'\t', round(noisePercent,3),'\t', end='', file=dataFile)
print(thisTrial['leftStreamFlip'],'\t', end='', file=dataFile)
print(thisTrial['rightStreamFlip'],'\t', end='', file=dataFile)
print(thisTrial['flipHoriz'],'\t', end='', file=dataFile)
print(thisTrial['flipVert'],'\t', end='', file=dataFile)
print(thisTrial['rightResponseFirst'],'\t', end='', file=dataFile)
print(thisTrial['orthographicalsim'],'\t', end='', file=dataFile)
i = 0
eachCorrect = np.ones(numRespsWanted)*-999; eachApproxCorrect = np.ones(numRespsWanted)*-999
for i in range(numRespsWanted): #scored and printed to dataFile in left first, right second order even if collected in different order
if i==0:
sequenceStream = sequenceStream1; correctAnswerIdxs = correctAnswerIdxsStream1;
else: sequenceStream = sequenceStream2; correctAnswerIdxs = correctAnswerIdxsStream2;
correct,approxCorrect,responsePosRelative = (
handleAndScoreResponse(passThisTrial,responses[i],responsesAutopilot[i],task,sequenceStream,thisTrial['cuePos'],correctAnswerIdxs ) )
eachCorrect[i] = correct
eachApproxCorrect[i] = approxCorrect
print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile
print('correct=',correct,' approxCorrect=',approxCorrect,' eachCorrect=',eachCorrect, ' responsePosRelative=', responsePosRelative)
numTrialsCorrect += eachCorrect.all() #so count -1 as 0
numTrialsEachCorrect += eachCorrect #list numRespsWanted long
numTrialsApproxCorrect += eachApproxCorrect.all()
numTrialsEachApproxCorrect += eachApproxCorrect #list numRespsWanted long
if exportImages: #catches one frame of response
myWin.getMovieFrame() #I cant explain why another getMovieFrame, and core.wait is needed
framesSaved +=1; core.wait(.1)
myWin.saveMovieFrames('images_sounds_movies/frames.png') #mov not currently supported
expStop=True
core.wait(.1)
if feedback: play_high_tone_correct_low_incorrect(correct, passThisTrial=False)
nDoneMain+=1
dataFile.flush(); logging.flush()
print('nDoneMain=', nDoneMain,' trials.nTotal=',trials.nTotal) #' trials.thisN=',trials.thisN
if (trials.nTotal > 6 and nDoneMain > 2 and nDoneMain %
( trials.nTotal*pctCompletedBreak/100. ) ==1): #dont modulus 0 because then will do it for last trial
nextText.setText('Press "SPACE" to continue!')
nextText.draw()
progressMsg = 'Completed ' + str(nDoneMain) + ' of ' + str(trials.nTotal) + ' trials'
NextRemindCountText.setText(progressMsg)
NextRemindCountText.draw()
myWin.flip() # myWin.flip(clearBuffer=True)
waiting=True
while waiting:
if autopilot: break
elif expStop == True:break
for key in event.getKeys(): #check if pressed abort-type key
if key in ['space','ESCAPE']:
waiting=False
if key in ['ESCAPE']:
expStop = True
myWin.clearBuffer()
core.wait(.2); time.sleep(.2)
#end main trials loop
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = 'Finishing at '+timeAndDateStr
print(msg); logging.info(msg)
if expStop:
msg = 'user aborted experiment on keypress with trials done=' + str(nDoneMain) + ' of ' + str(trials.nTotal+1)
print(msg); logging.error(msg)
if not doStaircase and (nDoneMain >0):
msg = 'Of ' + str(nDoneMain)+' trials, on '+str(numTrialsCorrect*1.0/nDoneMain*100.)+'% of all trials all targets reported exactly correct'
print(msg); logging.info(msg)
msg= 'All targets approximately correct in '+ str( round(numTrialsApproxCorrect*1.0/nDoneMain*100,1)) + '% of trials'
print(msg); logging.info(msg)
for i in range(numRespsWanted):
msg = 'stream'+str(i)+': '+str( round(numTrialsEachCorrect[i]*1.0/nDoneMain*100.,2) ) + '% correct'
print(msg); logging.info(msg)
msg = 'stream' + str(i) + ': '+ str( round(numTrialsEachApproxCorrect[i]*1.0/nDoneMain*100,2) ) +'% approximately correct'
print(msg); logging.info(msg)
logging.flush(); dataFile.close()
myWin.close() #have to close window if want to show a plot | {
"repo_name": "alexholcombe/twoWords",
"path": "specialFieldsStudentCode/twoWordsExperimentInvertedbackMayAlexContinue2.py",
"copies": "2",
"size": "52750",
"license": "mit",
"hash": 3622063637666545700,
"line_mean": 58.6056497175,
"line_max": 209,
"alpha_frac": 0.6849478673,
"autogenerated": false,
"ratio": 3.5471723488669222,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.028897676811722842,
"num_lines": 885
} |
#Alex Holcombe alex.holcombe@sydney.edu.au
#See the github repository for more information: https://github.com/alexholcombe/twoWords
from __future__ import print_function #use python3 style print
from psychopy import monitors, visual, event, data, logging, core, sound, gui
import psychopy.info
import numpy as np
from math import atan, log, ceil
import copy
import time, sys, os, pylab
try:
from noiseStaircaseHelpers import printStaircase, toStaircase, outOfStaircase, createNoise, plotDataAndPsychometricCurve
except ImportError:
print('Could not import from noiseStaircaseHelpers.py (you need that file to be in the same directory)')
try:
import stringResponse
except ImportError:
print('Could not import stringResponse.py (you need that file to be in the same directory)')
wordEccentricity=3 #degrees of visual angle away from the fixation point
tasks=['T1']; task = tasks[0]
#THINGS THAT COULD PREVENT SUCCESS ON A NEW MACHINE
#same screen or external screen? Set scrn=0 if one screen. scrn=1 means display stimulus on second screen.
#widthPix, heightPix
quitFinder = False #if checkRefreshEtc, quitFinder becomes True.
autopilot=False
demo=False #False
exportImages= False #quits after one trial
subject='Hubert' #user is prompted to enter true subject name
if autopilot: subject='auto'
if os.path.isdir('.'+os.sep+'data'):
dataDir='data'
else:
print('"data" directory does not exist, so saving data in present working directory')
dataDir='.'
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
showRefreshMisses=True #flicker fixation at refresh rate, to visualize if frames missed
feedback=True
autoLogging=False
refreshRate = 60.; #100
if demo:
refreshRate = 60.; #100
staircaseTrials = 25
prefaceStaircaseTrialsN = 20 #22
prefaceStaircaseNoise = np.array([5,20,20,20, 50,50,50,5,80,80,80,5,95,95,95]) #will be recycled / not all used, as needed
descendingPsycho = True #psychometric function- more noise means worse performance
threshCriterion = 0.58
numWordsInStream = 24
wordsUnparsed="the, and, for, you, say, but, his, not, she, can, who, get, her, all, one, out, see, him, now, how, its, our, two, way" #24 most common words
lettersUnparsed = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".upper()
wordList = wordsUnparsed.split(",") #split into list
for i in range(len(wordList)):
wordList[i] = wordList[i].replace(" ", "") #delete spaces
#Later on, this list will be randomly permuted for each trial
bgColor = [-.7,-.7,-.7] # [-1,-1,-1]
cueColor = [1.,1.,1.]
letterColor = [1.,1.,1.]
cueRadius = 7 #6 deg in Goodbourn & Holcombe
widthPix= 1280 #monitor width in pixels of Agosta
heightPix= 800 #800 #monitor height in pixels
monitorwidth = 38.7 #monitor width in cm
scrn=0 #0 to use main screen, 1 to use external screen connected to computer
fullscr=True #True to use fullscreen, False to not. Timing probably won't be quite right if fullscreen = False
allowGUI = False
if demo: monitorwidth = 23#18.0
if exportImages:
widthPix = 600; heightPix = 600
monitorwidth = 13.0
fullscr=False; scrn=0
framesSaved=0
if demo:
scrn=0; fullscr=False
widthPix = 800; heightPix = 600
monitorname='testMonitor'
allowGUI = True
viewdist = 57. #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)
print('pixelperdegree=',pixelperdegree)
# create a dialog from dictionary
infoFirst = { 'Do staircase (only)': False, 'Check refresh etc':True, 'Fullscreen (timing errors if not)': False, 'Screen refresh rate':refreshRate }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='Dual-RSVP experiment OR staircase to find thresh noise level for performance criterion',
order=['Do staircase (only)', 'Check refresh etc', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating'},
#fixed=['Check refresh etc'])#this attribute can't be changed by the user
)
if not OK.OK:
print('User cancelled from dialog box'); core.quit()
doStaircase = infoFirst['Do staircase (only)']
checkRefreshEtc = infoFirst['Check refresh etc']
fullscr = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
if checkRefreshEtc:
quitFinder = True
if quitFinder:
import os
applescript="\'tell application \"Finder\" to quit\'"
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
#letter size 2.5 deg
SOAms = 133 #Battelli, Agosta, Goodbourn, Holcombe mostly using 133
#Minimum SOAms should be 84 because any shorter, I can't always notice the second ring when lag1. 71 in Martini E2 and E1b (actually he used 66.6 but that's because he had a crazy refresh rate of 90 Hz)
letterDurMs = 80 #23.6 in Martini E2 and E1b (actually he used 22.2 but that's because he had a crazy refresh rate of 90 Hz)
ISIms = SOAms - letterDurMs
letterDurFrames = int( np.floor(letterDurMs / (1000./refreshRate)) )
cueDurFrames = letterDurFrames
ISIframes = int( np.floor(ISIms / (1000./refreshRate)) )
#have set ISIframes and letterDurFrames to integer that corresponds as close as possible to originally intended ms
rateInfo = 'total SOA=' + str(round( (ISIframes + letterDurFrames)*1000./refreshRate, 2)) + ' or ' + str(ISIframes + letterDurFrames) + ' frames, comprising\n'
rateInfo+= 'ISIframes ='+str(ISIframes)+' or '+str(ISIframes*(1000./refreshRate))+' ms and letterDurFrames ='+str(letterDurFrames)+' or '+str(round( letterDurFrames*(1000./refreshRate), 2))+'ms'
logging.info(rateInfo); print(rateInfo)
trialDurFrames = int( numWordsInStream*(ISIframes+letterDurFrames) ) #trial duration in frames
monitorname = 'testmonitor'
waitBlank = False
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#relying on monitorwidth cm (39 for Mitsubishi to do deg calculations) and gamma info in calibratn
mon.setSizePix( (widthPix,heightPix) )
units='deg' #'cm'
def openMyStimWindow(): #make it a function because have to do it several times, want to be sure is identical each time
myWin = visual.Window(monitor=mon,size=(widthPix,heightPix),allowGUI=allowGUI,units=units,color=bgColor,colorSpace='rgb',fullscr=fullscr,screen=scrn,waitBlanking=waitBlank) #Holcombe lab monitor
return myWin
myWin = openMyStimWindow()
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: #checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
#print(runInfo)
logging.info(runInfo)
print('Finished runInfo- which assesses the refresh and processes of this computer')
#check screen refresh is what assuming it is ##############################################
Hzs=list()
myWin.flip(); myWin.flip();myWin.flip();myWin.flip();
myWin.setRecordFrameIntervals(True) #otherwise myWin.fps won't work
print('About to measure frame flips')
for i in range(50):
myWin.flip()
Hzs.append( myWin.fps() ) #varies wildly on successive runs!
myWin.setRecordFrameIntervals(False)
# end testing of screen refresh########################################################
Hzs = np.array( Hzs ); Hz= np.median(Hzs)
msPerFrame= 1000./Hz
refreshMsg1= 'Frames per second ~='+ str( np.round(Hz,1) )
refreshRateTolerancePct = 3
pctOff = abs( (np.median(Hzs)-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
defaultNoiseLevel = 0.0 #to use if no staircase, can be set by user
trialsPerCondition = 1 #default value
dlgLabelsOrdered = list()
if doStaircase:
myDlg = gui.Dlg(title="Staircase to find appropriate noisePercent", pos=(200,400))
else:
myDlg = gui.Dlg(title="RSVP experiment", pos=(200,400))
if not autopilot:
myDlg.addField('Subject name (default="Hubert"):', 'Hubert', tip='or subject code')
dlgLabelsOrdered.append('subject')
if doStaircase:
easyTrialsCondText = 'Num preassigned noise trials to preface staircase with (default=' + str(prefaceStaircaseTrialsN) + '):'
myDlg.addField(easyTrialsCondText, tip=str(prefaceStaircaseTrialsN))
dlgLabelsOrdered.append('easyTrials')
myDlg.addField('Staircase trials (default=' + str(staircaseTrials) + '):', tip="Staircase will run until this number is reached or it thinks it has precise estimate of threshold")
dlgLabelsOrdered.append('staircaseTrials')
pctCompletedBreak = 101
else:
myDlg.addField('\tPercent noise dots=', defaultNoiseLevel, tip=str(defaultNoiseLevel))
dlgLabelsOrdered.append('defaultNoiseLevel')
myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))
dlgLabelsOrdered.append('trialsPerCondition')
pctCompletedBreak = 50
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Screen apparently NOT the desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels!!'
myDlg.addText(msgWrongResolution, color='Red')
logging.error(msgWrongResolution)
print(msgWrongResolution)
myDlg.addText('Note: to abort press ESC at a trials response screen', color=[-1.,1.,-1.]) # color='DimGrey') color names stopped working along the way, for unknown reason
myDlg.show()
if myDlg.OK: #unpack information entered in dialogue box
thisInfo = myDlg.data #this will be a list of data returned from each field added in order
if not autopilot:
name=thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: #if entered something
subject = name #change subject default name to what user entered
if doStaircase:
if len(thisInfo[dlgLabelsOrdered.index('staircaseTrials')]) >0:
staircaseTrials = int( thisInfo[ dlgLabelsOrdered.index('staircaseTrials') ] ) #convert string to integer
print('staircaseTrials entered by user=',staircaseTrials)
logging.info('staircaseTrials entered by user=',staircaseTrials)
if len(thisInfo[dlgLabelsOrdered.index('easyTrials')]) >0:
prefaceStaircaseTrialsN = int( thisInfo[ dlgLabelsOrdered.index('easyTrials') ] ) #convert string to integer
print('prefaceStaircaseTrialsN entered by user=',thisInfo[dlgLabelsOrdered.index('easyTrials')])
logging.info('prefaceStaircaseTrialsN entered by user=',prefaceStaircaseTrialsN)
else: #not doing staircase
trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer
print('trialsPerCondition=',trialsPerCondition)
logging.info('trialsPerCondition =',trialsPerCondition)
defaultNoiseLevel = int (thisInfo[ dlgLabelsOrdered.index('defaultNoiseLevel') ])
else:
print('User cancelled from dialog box.')
logging.flush()
core.quit()
if not demo:
allowGUI = False
myWin = openMyStimWindow() #reopen stim window. Had to close test window to allow for dialogue boxes
#set up output data file, log file, copy of program code, and logging
infix = '' #part of the filenames
if doStaircase:
infix = 'staircase_'
fileName = os.path.join(dataDir, subject + '_' + infix+ timeAndDateStr)
if not demo and not exportImages:
dataFile = open(fileName+'.txt', 'w')
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileName + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logFname = fileName+'.log'
ppLogF = logging.LogFile(logFname,
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#errors, data and warnings will be sent to this logfile
if demo or exportImages:
dataFile = sys.stdout; logF = sys.stdout
logging.console.setLevel(logging.ERROR) #only show this level messages and higher
logging.console.setLevel(logging.ERROR) #DEBUG means set console to receive nearly all messges, INFO next level, EXP, DATA, WARNING and ERROR
if fullscr and not demo and not exportImages:
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=False, ## True means report on everything
userProcsDetailed=True, ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
#randomSeed='set:42', ## a way to record, and optionally set, a random seed of type str for making reproducible random sequences
## None -> default
## 'time' will use experimentRuntime.epoch as the value for the seed, different value each time the script is run
##'set:time' --> seed value is set to experimentRuntime.epoch, and initialized: random.seed(info['randomSeed'])
##'set:42' --> set & initialize to str('42'), and will give the same sequence of random.random() for all runs of the script
)
logging.info(runInfo)
logging.flush()
textStimuliStream1 = list()
textStimuliStream2 = list() #used for second, simultaneous RSVP stream
def calcAndPredrawStimuli(wordList,thisTrial): #Called before each trial
if len(wordList) < numWordsInStream:
print('Error! Your word list must have at least ',numWordsInStream,'strings')
idxsIntoWordList = np.arange( len(wordList) ) #create a list of indexes of the entire word list: 0,1,2,3,4,5,...23
print('wordList=',wordList)
for i in range(0,numWordsInStream): #draw the words that will be used on this trial, the first 26 of the shuffled list
word = wordList[ i ] # #[ idxsIntoWordList[i] ]
#flipHoriz, flipVert textStim http://www.psychopy.org/api/visual/textstim.html
#Create one bucket of words for the left stream
textStimulusStream1 = visual.TextStim(myWin,text=word,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)
#Create a bucket of words for the right stream
textStimulusStream2 = visual.TextStim(myWin,text=word,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)
textStimulusStream1.setPos([-wordEccentricity,0]) #left
textStimuliStream1.append(textStimulusStream1) #add to list of text stimuli that comprise stream 1
textStimulusStream2.setPos([wordEccentricity,0]) #right
textStimuliStream2.append(textStimulusStream2) #add to list of text stimuli that comprise stream 2
#If you are Joel or someone else who needs to mess with the stream conditional on the cue position, this is probably where we are going to do it
#pseudoHomophonePos = thisTrial['cuePos'] -1
#Use these buckets by pulling out the drawn words in the order you want them. For now, just create the order you want.
idxsStream1 = idxsIntoWordList #first RSVP stream
np.random.shuffle(idxsIntoWordList) #0,1,2,3,4,5,... -> randomly permuted 3,2,5,...
idxsStream2 = copy.deepcopy(idxsIntoWordList) #make a copy for the right stream, and permute them on the next list
np.random.shuffle(idxsStream2)
return idxsStream1, idxsStream2
#create click sound for keyboard
try:
click=sound.Sound('406__tictacshutup__click-1-d.wav')
except: #in case file missing, create inferiro click manually
logging.warn('Could not load the desired click sound file, instead using manually created inferior click')
click=sound.Sound('D',octave=4, sampleRate=22050, secs=0.015, bits=8)
if showRefreshMisses:
fixSizePix = 32 #2.6 #make fixation bigger so flicker more conspicuous
else: fixSizePix = 32
fixColor = [1,1,1]
if exportImages: fixColor= [0,0,0]
fixatnNoiseTexture = np.round( np.random.rand(fixSizePix/4,fixSizePix/4) ,0 ) *2.0-1 #Can counterphase flicker noise texture to create salient flicker if you break fixation
#Construct the fixation point.
fixation= visual.PatchStim(myWin, tex=fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False)
fixationBlank= visual.PatchStim(myWin, tex= -1*fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False) #reverse contrast
fixationPoint= visual.PatchStim(myWin,tex='none',colorSpace='rgb',color=(1,1,1),size=4,units='pix',autoLog=autoLogging)
#Construct the holders for the experiment text that will appear on screen
respPromptStim = visual.TextStim(myWin,pos=(0, -.9),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim = visual.TextStim(myWin,pos=(0, -.8),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim.setText('Hit ENTER to accept. Backspace to edit')
respStim = visual.TextStim(myWin,pos=(0,0),colorSpace='rgb',color=(1,1,0),alignHoriz='center', alignVert='center',height=3,units='deg',autoLog=autoLogging)
requireAcceptance = False
nextText = visual.TextStim(myWin,pos=(0, .1),colorSpace='rgb',color = (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
NextRemindCountText = visual.TextStim(myWin,pos=(0,.2),colorSpace='rgb',color= (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
clickSound, badKeySound = stringResponse.setupSoundsForResponse()
screenshot= False; screenshotDone = False
stimList = []
#SETTING THE CONDITIONS, This implements the full factorial design!
cuePositions = np.array([10,11,12,13,14])
for cuePos in cuePositions:
for rightResponseFirst in [False,True]:
for bothWordsFlipped in [False,True]:
for pseudoHomophone in ['None','Left','Right']:
stimList.append( {'cuePos':cuePos, 'rightResponseFirst':rightResponseFirst,
'leftStreamFlip':bothWordsFlipped, 'rightStreamFlip':bothWordsFlipped, 'pseudoHomophone':pseudoHomophone} )
trials = data.TrialHandler(stimList,trialsPerCondition) #constant stimuli method
trialsForPossibleStaircase = data.TrialHandler(stimList,trialsPerCondition) #independent randomization, just to create random trials for staircase phase
numRightWrongEachCuepos = np.zeros([ len(cuePositions), 1 ]); #summary results to print out at end
logging.info( 'numtrials=' + str(trials.nTotal) + ' and each trialDurFrames='+str(trialDurFrames)+' or '+str(trialDurFrames*(1000./refreshRate))+ \
' ms' + ' task=' + task)
def numberToLetter(number): #0 = A, 25 = Z
#if it's not really a letter, return @
if number < 0 or number > 25:
return ('@')
else: #it's probably a letter
try:
return chr( ord('A')+number )
except:
return('@')
def letterToNumber(letter): #A = 0, Z = 25
#if it's not really a letter, return -999
#HOW CAN I GENERICALLY TEST FOR LENGTH. EVEN IN CASE OF A NUMBER THAT'S NOT PART OF AN ARRAY?
try:
#if len(letter) > 1:
# return (-999)
if letter < 'A' or letter > 'Z':
return (-999)
else: #it's a letter
return ord(letter)-ord('A')
except:
return (-999)
def wordToIdx(word,wordList):
#if it's not in the list of stimuli, return -999
try:
#http://stackoverflow.com/questions/7102050/how-can-i-get-a-python-generator-to-return-none-rather-than-stopiteration
firstMatchIdx = next((i for i, val in enumerate(wordList) if val.upper()==word), None) #return i (index) unless no matches, in which case return None
#print('Looked for ',word,' in ',wordList,'\nfirstMatchIdx =',firstMatchIdx)
return firstMatchIdx
except:
print('Unexpected error in wordToIdx with word=',word)
return (None)
#print header for data file
print('experimentPhase\ttrialnum\tsubject\ttask\t',file=dataFile,end='')
print('noisePercent\tleftStreamFlip\trightStreamFlip\t',end='',file=dataFile)
if task=='T1':
numRespsWanted = 2
dataFile.write('rightResponseFirst\t')
for i in range(numRespsWanted):
dataFile.write('cuePos'+str(i)+'\t') #have to use write to avoid ' ' between successive text, at least until Python 3
dataFile.write('answer'+str(i)+'\t')
dataFile.write('response'+str(i)+'\t')
dataFile.write('correct'+str(i)+'\t')
dataFile.write('responsePosRelative'+str(i)+'\t')
print('timingBlips',file=dataFile)
#end of header
def oneFrameOfStim( n,cue,seq1,seq2,cueDurFrames,letterDurFrames,ISIframes,thisTrial,textStimuliStream1,textStimuliStream2,
noise,proportnNoise,allFieldCoords,numNoiseDots ):
#defining a function to draw each frame of stim.
#seq1 is an array of indices corresponding to the appropriate pre-drawn stimulus, contained in textStimuli
SOAframes = letterDurFrames+ISIframes
cueFrames = thisTrial['cuePos']*SOAframes #cuesPos is global variable
stimN = int( np.floor(n/SOAframes) )
frameOfThisLetter = n % SOAframes #every SOAframes, new letter
showLetter = frameOfThisLetter < letterDurFrames #if true, it's not time for the blank ISI. it's still time to draw the letter
#print 'n=',n,' SOAframes=',SOAframes, ' letterDurFrames=', letterDurFrames, ' (n % SOAframes) =', (n % SOAframes) #DEBUGOFF
thisStimIdx = seq1[stimN] #which letter, from A to Z (1 to 26), should be shown?
if seq2 is not None:
thisStim2Idx = seq2[stimN]
#so that any timing problems occur just as often for every frame, always draw the letter and the cue, but simply draw it in the bgColor when it's not meant to be on
cue.setLineColor( bgColor )
if type(cueFrames) not in [tuple,list,np.ndarray]: #scalar. But need collection to do loop based on it
cueFrames = list([cueFrames])
for cueFrame in cueFrames: #cheTck whether it's time for any cue
if n>=cueFrame and n<cueFrame+cueDurFrames:
cue.setLineColor( cueColor )
if showLetter:
textStimuliStream1[thisStimIdx].setColor( letterColor )
textStimuliStream2[thisStim2Idx].setColor( letterColor )
else:
textStimuliStream1[thisStimIdx].setColor( bgColor )
textStimuliStream2[thisStim2Idx].setColor( bgColor )
textStimuliStream1[thisStimIdx].flipHoriz = thisTrial['leftStreamFlip']
textStimuliStream2[thisStim2Idx].flipHoriz = thisTrial['rightStreamFlip']
textStimuliStream1[thisStimIdx].draw()
textStimuliStream2[thisStim2Idx].draw()
cue.draw()
refreshNoise = False #Not recommended because takes longer than a frame, even to shuffle apparently. Or may be setXYs step
if proportnNoise>0 and refreshNoise:
if frameOfThisLetter ==0:
np.random.shuffle(allFieldCoords)
dotCoords = allFieldCoords[0:numNoiseDots]
noise.setXYs(dotCoords)
if proportnNoise>0:
noise.draw()
return True
# #######End of function definition that displays the stimuli!!!! #####################################
#############################################################################################################################
cue = visual.Circle(myWin,
radius=cueRadius,#Martini used circles with diameter of 12 deg
lineColorSpace = 'rgb',
lineColor=bgColor,
lineWidth=4.0, #in pixels. Was thinner (2 pixels) in letter AB experiments
units = 'deg',
fillColorSpace = 'rgb',
fillColor=None, #beware, with convex shapes fill colors don't work
pos= [0,0], #the anchor (rotation and vertices are position with respect to this)
interpolate=True,
autoLog=False)#this stim changes too much for autologging to be useful
ltrHeight = 2.5 #Martini letters were 2.5deg high
#All noise dot coordinates ultimately in pixels, so can specify each dot is one pixel
noiseFieldWidthDeg=ltrHeight *1.0
noiseFieldWidthPix = int( round( noiseFieldWidthDeg*pixelperdegree ) )
def timingCheckAndLog(ts,trialN):
#check for timing problems and log them
#ts is a list of the times of the clock after each frame
interframeIntervs = np.diff(ts)*1000
#print ' interframe intervs were ',around(interframeIntervs,1) #DEBUGOFF
frameTimeTolerance=.3 #proportion longer than refreshRate that will not count as a miss
longFrameLimit = np.round(1000/refreshRate*(1.0+frameTimeTolerance),2)
idxsInterframeLong = np.where( interframeIntervs > longFrameLimit ) [0] #frames that exceeded 150% of expected duration
numCasesInterframeLong = len( idxsInterframeLong )
if numCasesInterframeLong >0 and (not demo):
longFramesStr = 'ERROR,'+str(numCasesInterframeLong)+' frames were longer than '+str(longFrameLimit)+' ms'
if demo:
longFramesStr += 'not printing them all because in demo mode'
else:
longFramesStr += ' apparently screen refreshes skipped, interframe durs were:'+\
str( np.around( interframeIntervs[idxsInterframeLong] ,1 ) )+ ' and was these frames: '+ str(idxsInterframeLong)
if longFramesStr != None:
logging.error( 'trialnum='+str(trialN)+' '+longFramesStr )
if not demo:
flankingAlso=list()
for idx in idxsInterframeLong: #also print timing of one before and one after long frame
if idx-1>=0:
flankingAlso.append(idx-1)
else: flankingAlso.append(np.NaN)
flankingAlso.append(idx)
if idx+1<len(interframeIntervs): flankingAlso.append(idx+1)
else: flankingAlso.append(np.NaN)
flankingAlso = np.array(flankingAlso)
flankingAlso = flankingAlso[np.negative(np.isnan(flankingAlso))] #remove nan values
flankingAlso = flankingAlso.astype(np.integer) #cast as integers, so can use as subscripts
logging.info( 'flankers also='+str( np.around( interframeIntervs[flankingAlso], 1) ) ) #because this is not an essential error message, as previous one already indicates error
#As INFO, at least it won't fill up the console when console set to WARNING or higher
return numCasesInterframeLong
#end timing check
trialClock = core.Clock()
numTrialsCorrect = 0;
numTrialsApproxCorrect = 0;
numTrialsEachCorrect= np.zeros( numRespsWanted )
numTrialsEachApproxCorrect= np.zeros( numRespsWanted )
def do_RSVP_stim(thisTrial, seq1, seq2, proportnNoise,trialN):
#relies on global variables:
# textStimuli, logging, bgColor
# thisTrial should have 'cuePos'
global framesSaved #because change this variable. Can only change a global variable if you declare it
cuesPos = [] #will contain the positions in the stream of all the cues (targets)
cuesPos.append(thisTrial['cuePos'])
cuesPos = np.array(cuesPos)
noise = None; allFieldCoords=None; numNoiseDots=0
if proportnNoise > 0: #gtenerating noise is time-consuming, so only do it once per trial. Then shuffle noise coordinates for each letter
(noise,allFieldCoords,numNoiseDots) = createNoise(proportnNoise,myWin,noiseFieldWidthPix, bgColor)
preDrawStimToGreasePipeline = list() #I don't know why this works, but without drawing it I have consistent timing blip first time that draw ringInnerR for phantom contours
cue.setLineColor(bgColor)
preDrawStimToGreasePipeline.extend([cue])
for stim in preDrawStimToGreasePipeline:
stim.draw()
myWin.flip(); myWin.flip()
#end preparation of stimuli
core.wait(.1);
trialClock.reset()
fixatnPeriodMin = 0.3
fixatnPeriodFrames = int( (np.random.rand(1)/2.+fixatnPeriodMin) *refreshRate) #random interval between 800ms and 1.3s
ts = list(); #to store time of each drawing, to check whether skipped frames
for i in range(fixatnPeriodFrames+20): #prestim fixation interval
#if i%4>=2 or demo or exportImages: #flicker fixation on and off at framerate to see when skip frame
# fixation.draw()
#else: fixationBlank.draw()
fixationPoint.draw()
myWin.flip() #end fixation interval
#myWin.setRecordFrameIntervals(True); #can't get it to stop detecting superlong frames
t0 = trialClock.getTime()
for n in range(trialDurFrames): #this is the loop for this trial's stimulus!
worked = oneFrameOfStim( n,cue,seq1,seq2,cueDurFrames,letterDurFrames,ISIframes,thisTrial,textStimuliStream1,textStimuliStream2,
noise,proportnNoise,allFieldCoords,numNoiseDots ) #draw letter and possibly cue and noise on top
fixationPoint.draw()
if exportImages:
myWin.getMovieFrame(buffer='back') #for later saving
framesSaved +=1
myWin.flip()
t=trialClock.getTime()-t0; ts.append(t);
#end of big stimulus loop
myWin.setRecordFrameIntervals(False);
if task=='T1':
respPromptStim.setText('What was circled?',log=False)
else: respPromptStim.setText('Error: unexpected task',log=False)
postCueNumBlobsAway=-999 #doesn't apply to non-tracking and click tracking task
correctAnswerIdxsStream1 = np.array( seq1[cuesPos] )
correctAnswerIdxsStream2 = np.array( seq2[cuesPos] )
#print('correctAnswerIdxsStream1=',correctAnswerIdxsStream1, 'wordList[correctAnswerIdxsStream1[0]]=',wordList[correctAnswerIdxsStream1[0]])
return cuesPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2,ts
def handleAndScoreResponse(passThisTrial,response,responseAutopilot,task,stimSequence,cuePos,correctAnswerIdx):
#Handle response, calculate whether correct, ########################################
#responses are actual characters
#correctAnswer is index into stimSequence
#autopilot is global variable
if autopilot or passThisTrial:
response = responseAutopilot
#print('handleAndScoreResponse correctAnswerIdxs=',correctAnswerIdxs,'\nstimSequence=',stimSequence, '\nwords=',wordList)
correct = 0
approxCorrect = 0
posOfResponse = -999
responsePosRelative = -999
idx = correctAnswerIdx
correctAnswer = wordList[idx].upper()
responseString= ''.join(['%s' % char for char in response])
responseString= responseString.upper()
#print('correctAnswer=',correctAnswer ,' responseString=',responseString)
if correctAnswer == responseString:
correct = 1
#print('correct=',correct)
responseWordIdx = wordToIdx(responseString,wordList)
if responseWordIdx is None: #response is not in the wordList
posOfResponse = -999
logging.warn('Response was not present in the stimulus stream')
else:
posOfResponse= np.where( responseWordIdx==stimSequence )
posOfResponse= posOfResponse[0] #list with two entries, want first which will be array of places where the response was found in the sequence
if len(posOfResponse) > 1:
logging.error('Expected response to have occurred in only one position in stream')
posOfResponse = posOfResponse[0] #first element of list (should be only one element long
responsePosRelative = posOfResponse - cuePos
approxCorrect = abs(responsePosRelative)<= 3 #Vul efficacy measure of getting it right to within plus/minus
#print('wordToIdx(',responseString,',',wordList,')=',responseWordIdx,' stimSequence=',stimSequence,'\nposOfResponse = ',posOfResponse) #debugON
#print response stuff to dataFile
#header was answerPos0, answer0, response0, correct0, responsePosRelative0
print(cuePos,'\t', end='', file=dataFile)
print(correctAnswer, '\t', end='', file=dataFile) #answer0
print(responseString, '\t', end='', file=dataFile) #response0
print(correct, '\t', end='',file=dataFile) #correct0
print(responsePosRelative, '\t', end='',file=dataFile) #responsePosRelative0
return correct,approxCorrect,responsePosRelative
#end handleAndScoreResponses
def play_high_tone_correct_low_incorrect(correct, passThisTrial=False):
highA = sound.Sound('G',octave=5, sampleRate=6000, secs=.3, bits=8)
low = sound.Sound('F',octave=3, sampleRate=6000, secs=.3, bits=8)
highA.setVolume(0.9)
low.setVolume(1.0)
if correct:
highA.play()
elif passThisTrial:
high= sound.Sound('G',octave=4, sampleRate=2000, secs=.08, bits=8)
for i in range(2):
high.play(); low.play();
else: #incorrect
low.play()
expStop=False
nDoneMain = -1 #change to zero once start main part of experiment
if doStaircase:
#create the staircase handler
useQuest = True
if useQuest:
staircase = data.QuestHandler(startVal = 95,
startValSd = 80,
stopInterval= 1, #sd of posterior has to be this small or smaller for staircase to stop, unless nTrials reached
nTrials = staircaseTrials,
#extraInfo = thisInfo,
pThreshold = threshCriterion, #0.25,
gamma = 1./26,
delta=0.02, #lapse rate, I suppose for Weibull function fit
method = 'quantile', #uses the median of the posterior as the final answer
stepType = 'log', #will home in on the 80% threshold. But stepType = 'log' doesn't usually work
minVal=1, maxVal = 100
)
print('created QUEST staircase')
else:
stepSizesLinear = [.2,.2,.1,.1,.05,.05]
stepSizesLog = [log(1.4,10),log(1.4,10),log(1.3,10),log(1.3,10),log(1.2,10)]
staircase = data.StairHandler(startVal = 0.1,
stepType = 'log', #if log, what do I want to multiply it by
stepSizes = stepSizesLog, #step size to use after each reversal
minVal=0, maxVal=1,
nUp=1, nDown=3, #will home in on the 80% threshold
nReversals = 2, #The staircase terminates when nTrials have been exceeded, or when both nReversals and nTrials have been exceeded
nTrials=1)
print('created conventional staircase')
if prefaceStaircaseTrialsN > len(prefaceStaircaseNoise): #repeat array to accommodate desired number of easyStarterTrials
prefaceStaircaseNoise = np.tile( prefaceStaircaseNoise, ceil( prefaceStaircaseTrialsN/len(prefaceStaircaseNoise) ) )
prefaceStaircaseNoise = prefaceStaircaseNoise[0:prefaceStaircaseTrialsN]
phasesMsg = ('Doing '+str(prefaceStaircaseTrialsN)+'trials with noisePercent= '+str(prefaceStaircaseNoise)+' then doing a max '+str(staircaseTrials)+'-trial staircase')
print(phasesMsg); logging.info(phasesMsg)
#staircaseStarterNoise PHASE OF EXPERIMENT
corrEachTrial = list() #only needed for easyStaircaseStarterNoise
staircaseTrialN = -1; mainStaircaseGoing = False
while (not staircase.finished) and expStop==False: #staircase.thisTrialN < staircase.nTrials
if staircaseTrialN+1 < len(prefaceStaircaseNoise): #still doing easyStaircaseStarterNoise
staircaseTrialN += 1
noisePercent = prefaceStaircaseNoise[staircaseTrialN]
else:
if staircaseTrialN+1 == len(prefaceStaircaseNoise): #add these non-staircase trials so QUEST knows about them
mainStaircaseGoing = True
print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise)
staircase.importData(100-prefaceStaircaseNoise, np.array(corrEachTrial))
printStaircase(staircase, descendingPsycho, briefTrialUpdate=False, printInternalVal=True, alsoLog=False)
try: #advance the staircase
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)
noisePercent = 100. - staircase.next() #will step through the staircase, based on whether told it (addResponse) got it right or wrong
staircaseTrialN += 1
except StopIteration: #Need this here, even though test for finished above. I can't understand why finished test doesn't accomplish this.
print('stopping because staircase.next() returned a StopIteration, which it does when it is finished')
break #break out of the trials loop
#print('staircaseTrialN=',staircaseTrialN)
idxsStream1, idxsStream2 = calcAndPredrawStimuli(wordList,staircaseTrials)
cuesPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2, ts = \
do_RSVP_stim(cuePos, idxsStream1, idxsStream2, noisePercent/100.,staircaseTrialN)
numCasesInterframeLong = timingCheckAndLog(ts,staircaseTrialN)
expStop,passThisTrial,responses,responsesAutopilot = \
stringResponse.collectStringResponse(numRespsWanted,respPromptStim,respStim,acceptTextStim,myWin,clickSound,badKeySound,
requireAcceptance,autopilot,responseDebug=True)
if not expStop:
if mainStaircaseGoing:
print('staircase\t', end='', file=dataFile)
else:
print('staircase_preface\t', end='', file=dataFile)
#header start 'trialnum\tsubject\ttask\t'
print(staircaseTrialN,'\t', end='', file=dataFile) #first thing printed on each line of dataFile
print(subject,'\t',task,'\t', round(noisePercent,2),'\t', end='', file=dataFile)
correct,approxCorrect,responsePosRelative= handleAndScoreResponse(
passThisTrial,responses,responseAutopilot,task,sequenceLeft,cuesPos[0],correctAnswerIdx )
print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile
core.wait(.06)
if feedback:
play_high_tone_correct_low_incorrect(correct, passThisTrial=False)
print('staircaseTrialN=', staircaseTrialN,' noisePercent=',round(noisePercent,3),' T1approxCorrect=',T1approxCorrect) #debugON
corrEachTrial.append(T1approxCorrect)
if mainStaircaseGoing:
staircase.addResponse(T1approxCorrect, intensity = 100-noisePercent) #Add a 1 or 0 to signify a correct/detected or incorrect/missed trial
#print('Have added an intensity of','{:.3f}'.format(100-noisePercent), 'T1approxCorrect =', T1approxCorrect, ' to staircase') #debugON
#ENDING STAIRCASE PHASE
if staircaseTrialN+1 < len(prefaceStaircaseNoise) and (staircaseTrialN>=0): #exp stopped before got through staircase preface trials, so haven't imported yet
print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise[0:staircaseTrialN+1])
staircase.importData(100-prefaceStaircaseNoise[0:staircaseTrialN], np.array(corrEachTrial))
print('framesSaved after staircase=',framesSaved) #debugON
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = ('prefaceStaircase phase' if expStop else '')
msg += ('ABORTED' if expStop else 'Finished') + ' staircase part of experiment at ' + timeAndDateStr
logging.info(msg); print(msg)
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)
#print('staircase.quantile=',round(staircase.quantile(),2),' sd=',round(staircase.sd(),2))
threshNoise = round(staircase.quantile(),3)
if descendingPsycho:
threshNoise = 100- threshNoise
threshNoise = max( 0, threshNoise ) #e.g. ff get all trials wrong, posterior peaks at a very negative number
msg= 'Staircase estimate of threshold = ' + str(threshNoise) + ' with sd=' + str(round(staircase.sd(),2))
logging.info(msg); print(msg)
myWin.close()
#Fit and plot data
fit = None
try:
intensityForCurveFitting = staircase.intensities
if descendingPsycho:
intensityForCurveFitting = 100-staircase.intensities #because fitWeibull assumes curve is ascending
fit = data.FitWeibull(intensityForCurveFitting, staircase.data, expectedMin=1/26., sems = 1.0/len(staircase.intensities))
except:
print("Fit failed.")
plotDataAndPsychometricCurve(staircase,fit,descendingPsycho,threshCriterion)
#save figure to file
pylab.savefig(fileName+'.pdf')
print('The plot has been saved, as '+fileName+'.pdf')
pylab.show() #must call this to actually show plot
else: #not staircase
noisePercent = defaultNoiseLevel
phasesMsg = 'Experiment will have '+str(trials.nTotal)+' trials. Letters will be drawn with superposed noise of ' + "{:.2%}".format(defaultNoiseLevel)
print(phasesMsg); logging.info(phasesMsg)
nDoneMain =0
while nDoneMain < trials.nTotal and expStop==False: #MAIN EXPERIMENT LOOP
if nDoneMain==0:
msg='Starting main (non-staircase) part of experiment'
logging.info(msg); print(msg)
thisTrial = trials.next() #get a proper (non-staircase) trial
sequenceStream1, sequenceStream2 = calcAndPredrawStimuli(wordList,thisTrial)
cuesPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2, ts = \
do_RSVP_stim(thisTrial, sequenceStream1, sequenceStream2, noisePercent/100.,nDoneMain)
numCasesInterframeLong = timingCheckAndLog(ts,nDoneMain)
#call for each response
expStop = list(); passThisTrial = list(); responses=list(); responsesAutopilot=list()
numCharsInResponse = len(wordList[0])
dL = [None]*numRespsWanted #dummy list for null values
expStop = copy.deepcopy(dL); responses = copy.deepcopy(dL); responsesAutopilot = copy.deepcopy(dL); passThisTrial=copy.deepcopy(dL)
responseOrder = range(numRespsWanted)
if thisTrial['rightResponseFirst']: #change order of indices depending on rightResponseFirst. response0, answer0 etc refer to which one had to be reported first
responseOrder.reverse()
for i in responseOrder:
x = 3* wordEccentricity*(i*2-1) #put it 3 times farther out than stimulus, so participant is sure which is left and which right
expStop[i],passThisTrial[i],responses[i],responsesAutopilot[i] = stringResponse.collectStringResponse(
numCharsInResponse,x,respPromptStim,respStim,acceptTextStim,fixationPoint,myWin,clickSound,badKeySound,
requireAcceptance,autopilot,responseDebug=True)
expStop = np.array(expStop).any(); passThisTrial = np.array(passThisTrial).any()
if not expStop:
print('main\t', end='', file=dataFile) #first thing printed on each line of dataFile to indicate main part of experiment, not staircase
print(nDoneMain,'\t', end='', file=dataFile)
print(subject,'\t',task,'\t', round(noisePercent,3),'\t', end='', file=dataFile)
print(thisTrial['leftStreamFlip'],'\t', end='', file=dataFile)
print(thisTrial['rightStreamFlip'],'\t', end='', file=dataFile)
print(thisTrial['rightResponseFirst'],'\t', end='', file=dataFile)
i = 0
eachCorrect = np.ones(numRespsWanted)*-999; eachApproxCorrect = np.ones(numRespsWanted)*-999
for i in range(numRespsWanted): #scored and printed to dataFile in left first, right second order even if collected in different order
if i==0:
sequenceStream = sequenceStream1; correctAnswerIdxs = correctAnswerIdxsStream1;
else: sequenceStream = sequenceStream2; correctAnswerIdxs = correctAnswerIdxsStream2;
correct,approxCorrect,responsePosRelative = (
handleAndScoreResponse(passThisTrial,responses[i],responsesAutopilot[i],task,sequenceStream,thisTrial['cuePos'],correctAnswerIdxs ) )
eachCorrect[i] = correct
eachApproxCorrect[i] = approxCorrect
print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile
print('correct=',correct,' approxCorrect=',approxCorrect,' eachCorrect=',eachCorrect, ' responsePosRelative=', responsePosRelative)
numTrialsCorrect += eachCorrect.all() #so count -1 as 0
numTrialsApproxCorrect += eachApproxCorrect.all()
numTrialsEachCorrect += eachCorrect #list numRespsWanted long
numTrialsEachApproxCorrect += eachApproxCorrect #list numRespsWanted long
if exportImages: #catches one frame of response
myWin.getMovieFrame() #I cant explain why another getMovieFrame, and core.wait is needed
framesSaved +=1; core.wait(.1)
myWin.saveMovieFrames('images_sounds_movies/frames.png') #mov not currently supported
expStop=True
core.wait(.1)
if feedback: play_high_tone_correct_low_incorrect(correct, passThisTrial=False)
nDoneMain+=1
dataFile.flush(); logging.flush()
print('nDoneMain=', nDoneMain,' trials.nTotal=',trials.nTotal) #' trials.thisN=',trials.thisN
if (trials.nTotal > 6 and nDoneMain > 2 and nDoneMain %
( trials.nTotal*pctCompletedBreak/100. ) ==1): #dont modulus 0 because then will do it for last trial
nextText.setText('Press "SPACE" to continue!')
nextText.draw()
progressMsg = 'Completed ' + str(nDoneMain) + ' of ' + str(trials.nTotal) + ' trials'
NextRemindCountText.setText(progressMsg)
NextRemindCountText.draw()
myWin.flip() # myWin.flip(clearBuffer=True)
waiting=True
while waiting:
if autopilot: break
elif expStop == True:break
for key in event.getKeys(): #check if pressed abort-type key
if key in ['space','ESCAPE']:
waiting=False
if key in ['ESCAPE']:
expStop = True
myWin.clearBuffer()
core.wait(.2); time.sleep(.2)
#end main trials loop
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = 'Finishing at '+timeAndDateStr
print(msg); logging.info(msg)
if expStop:
msg = 'user aborted experiment on keypress with trials done=' + str(nDoneMain) + ' of ' + str(trials.nTotal+1)
print(msg); logging.error(msg)
if not doStaircase and (nDoneMain >0):
msg = 'Of ' + str(nDoneMain)+' trials, on '+str(numTrialsCorrect*1.0/nDoneMain*100.)+'% of all trials all targets reported exactly correct'
print(msg); logging.info(msg)
msg= 'All targets approximately correct in '+ str( round(numTrialsApproxCorrect*1.0/nDoneMain*100,1)) + '% of trials'
print(msg); logging.info(msg)
for i in range(numRespsWanted):
msg = 'stream'+str(i)+': '+str( round(numTrialsEachCorrect[i]*1.0/nDoneMain*100.,2) ) + '% correct'
print(msg); logging.info(msg)
msg = 'stream' + str(i) + ': '+ str( round(numTrialsEachApproxCorrect[i]*1.0/nDoneMain*100,2) ) +'% approximately correct'
print(msg); logging.info(msg)
logging.flush(); dataFile.close()
myWin.close() #have to close window if want to show a plot
if quitFinder:
applescript="\'tell application \"Finder\" to launch\'" #turn Finder back on
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
| {
"repo_name": "alexholcombe/twoWords",
"path": "specialFieldsStudentCode/twoWordsForJoel.py",
"copies": "2",
"size": "49800",
"license": "mit",
"hash": 399086273864182200,
"line_mean": 58.0747330961,
"line_max": 209,
"alpha_frac": 0.6828313253,
"autogenerated": false,
"ratio": 3.5591766723842198,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.029874520919085417,
"num_lines": 843
} |
#Alex Holcombe alex.holcombe@sydney.edu.au
#Modified by Kim Ransley from the twoWords.py program at the github repository: https://github.com/alexholcombe/twoWords
#1. Inverted
#2. One word
#3. duration
#4. trail, trial
#5. numCharsInResponse to fit words of diff length?
#6. total number of trials?
#7. correct responses
from __future__ import print_function
from psychopy import monitors, visual, event, data, logging, core, sound, gui
import psychopy.info
import scipy
import numpy as np
from math import atan, log, ceil
import copy
import time, sys, os, pylab
try:
from noiseStaircaseHelpers import printStaircase, toStaircase, outOfStaircase, createNoise, plotDataAndPsychometricCurve
except ImportError:
print('Could not import from noiseStaircaseHelpers.py (you need that file to be in the same directory)')
try:
import stringResponse
except ImportError:
print('Could not import stringResponse.py (you need that file to be in the same directory)')
wordEccentricity=0
tasks=['T1']; task = tasks[0]
#THINGS THAT COULD PREVENT SUCCESS ON A STRANGE MACHINE
#same screen or external screen? Set scrn=0 if one screen. scrn=1 means display stimulus on second screen.
#widthPix, heightPix
quitFinder = False #if checkRefreshEtc, quitFinder becomes True
autopilot=False
demo=False #False
exportImages= False #quits after one trial
subject='Hubert' #user is prompted to enter true subject name
if autopilot: subject='auto'
if os.path.isdir('.'+os.sep+'data'):
dataDir='data'
else:
print('"data" directory does not exist, so saving data in present working directory')
dataDir='.'
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
showRefreshMisses=True #flicker fixation at refresh rate, to visualize if frames missed
feedback=True
autoLogging=False
if demo:
refreshRate = 60.; #100
staircaseTrials = 25
prefaceStaircaseTrialsN = 20 #22
prefaceStaircaseNoise = np.array([5,20,20,20, 50,50,50,5,80,80,80,5,95,95,95]) #will be recycled / not all used, as needed
descendingPsycho = True #psychometric function- more noise means worse performance
threshCriterion = 0.58
numWordsInStream = 1
# reads word in from external source
wordList1 = open("wordList1.txt")
wordList2=open("wordList2.txt")
wordList1 = [x.rstrip() for x in wordList1.readlines()]
wordList2 = [x.rstrip() for x in wordList2.readlines()]
for i in range(len(wordList1)):
wordList1[i] = wordList1[i].replace(" ", "") #delete spaces
for i in range(len(wordList2)):
wordList2[i] = wordList2[i].replace(" ", "") #delete spaces
bgColor = [-.7,-.7,-.7] # [-1,-1,-1]
cueColor = [-.7,-.7,-.7] #originally [1.,1.,1.]
letterColor = [1.,1.,1.]
cueRadius = 7 #6 deg in Goodbourn & Holcombe
widthPix= 1280 #monitor width in pixels of Agosta
heightPix= 800 #800 #monitor height in pixels
monitorwidth = 38.7 #monitor width in cm
scrn=0 #0 to use main screen, 1 to use external screen connected to computer
fullscr=False #True to use fullscreen, False to not. Timing probably won't be quite right if fullscreen = False
allowGUI = False
if demo: monitorwidth = 23#18.0
if exportImages:
widthPix = 600; heightPix = 600
monitorwidth = 13.0
fullscr=False; scrn=0
framesSaved=0
if demo:
scrn=0; fullscr=False
widthPix = 800; heightPix = 600
monitorname='testMonitor'
allowGUI = True
viewdist = 57. #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)
print('pixelperdegree=',pixelperdegree)
# create a dialog from dictionary
infoFirst = { 'Do staircase (only)': False, 'Check refresh etc':False, 'Fullscreen (timing errors if not)': False, 'Screen refresh rate': 60 }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='AB experiment OR staircase to find thresh noise level for T1 performance criterion',
order=['Do staircase (only)', 'Check refresh etc', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating'},
#fixed=['Check refresh etc'])#this attribute can't be changed by the user
)
if not OK.OK:
print('User cancelled from dialog box'); core.quit()
doStaircase = infoFirst['Do staircase (only)']
checkRefreshEtc = infoFirst['Check refresh etc']
fullscr = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
if checkRefreshEtc:
quitFinder = True
if quitFinder:
import os
applescript="\'tell application \"Finder\" to quit\'"
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
#letter size 2.5 deg
SOAms = 500 #Battelli, Agosta, Goodbourn, Holcombe mostly using 133 #KR: was 233
#Minimum SOAms should be 84 because any shorter, I can't always notice the second ring when lag1. 71 in Martini E2 and E1b (actually he used 66.6 but that's because he had a crazy refresh rate of 90 Hz)
letterDurMs = 90 #Was 17. 23.6 in Martini E2 and E1b (actually he used 22.2 but that's because he had a crazy refresh rate of 90 Hz)
ISIms = SOAms - letterDurMs
letterDurFrames = int( np.floor(letterDurMs / (1000./refreshRate)) )
cueDurFrames = letterDurFrames
ISIframes = int( np.floor(ISIms / (1000./refreshRate)) )
#have set ISIframes and letterDurFrames to integer that corresponds as close as possible to originally intended ms
rateInfo = 'total SOA=' + str(round( (ISIframes + letterDurFrames)*1000./refreshRate, 2)) + ' or ' + str(ISIframes + letterDurFrames) + ' frames, comprising\n'
rateInfo+= 'ISIframes ='+str(ISIframes)+' or '+str(ISIframes*(1000./refreshRate))+' ms and letterDurFrames ='+str(letterDurFrames)+' or '+str(round( letterDurFrames*(1000./refreshRate), 2))+'ms'
logging.info(rateInfo); print(rateInfo)
trialDurFrames = int( numWordsInStream*(ISIframes+letterDurFrames) ) #trial duration in frames
monitorname = 'testmonitor'
waitBlank = False
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#relying on monitorwidth cm (39 for Mitsubishi to do deg calculations) and gamma info in calibratn
mon.setSizePix( (widthPix,heightPix) )
units='deg' #'cm'
def openMyStimWindow(): #make it a function because have to do it several times, want to be sure is identical each time
myWin = visual.Window(monitor=mon,size=(widthPix,heightPix),allowGUI=allowGUI,units=units,color=bgColor,colorSpace='rgb',fullscr=fullscr,screen=scrn,waitBlanking=waitBlank) #Holcombe lab monitor
return myWin
myWin = openMyStimWindow()
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: #checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
#print(runInfo)
logging.info(runInfo)
print('Finished runInfo- which assesses the refresh and processes of this computer')
#check screen refresh is what assuming it is ##############################################
Hzs=list()
myWin.flip(); myWin.flip();myWin.flip();myWin.flip();
myWin.setRecordFrameIntervals(True) #otherwise myWin.fps won't work
print('About to measure frame flips')
for i in range(50):
myWin.flip()
Hzs.append( myWin.fps() ) #varies wildly on successive runs!
myWin.setRecordFrameIntervals(False)
# end testing of screen refresh########################################################
Hzs = np.array( Hzs ); Hz= np.median(Hzs)
msPerFrame= 1000./Hz
refreshMsg1= 'Frames per second ~='+ str( np.round(Hz,1) )
refreshRateTolerancePct = 3
pctOff = abs( (np.median(Hzs)-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
defaultNoiseLevel = 0.0 #to use if no staircase, can be set by user
trialsPerCondition = 20 #default value
dlgLabelsOrdered = list()
if doStaircase:
myDlg = gui.Dlg(title="Staircase to find appropriate noisePercent", pos=(200,400))
else:
myDlg = gui.Dlg(title="RSVP experiment", pos=(200,400))
if not autopilot:
myDlg.addField('Subject name (default="Hubert"):', 'Hubert', tip='or subject code')
dlgLabelsOrdered.append('subject')
if doStaircase:
easyTrialsCondText = 'Num preassigned noise trials to preface staircase with (default=' + str(prefaceStaircaseTrialsN) + '):'
myDlg.addField(easyTrialsCondText, tip=str(prefaceStaircaseTrialsN))
dlgLabelsOrdered.append('easyTrials')
myDlg.addField('Staircase trials (default=' + str(staircaseTrials) + '):', tip="Staircase will run until this number is reached or it thinks it has precise estimate of threshold")
dlgLabelsOrdered.append('staircaseTrials')
pctCompletedBreak = 101
else:
myDlg.addField('\tPercent noise dots=', defaultNoiseLevel, tip=str(defaultNoiseLevel))
dlgLabelsOrdered.append('defaultNoiseLevel')
myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))
dlgLabelsOrdered.append('trialsPerCondition')
pctCompletedBreak = 50
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Screen apparently NOT the desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels!!'
myDlg.addText(msgWrongResolution, color='Red')
logging.error(msgWrongResolution)
print(msgWrongResolution)
myDlg.addText('Note: to abort press ESC at a trials response screen', color=[-1.,1.,-1.]) # color='DimGrey') color names stopped working along the way, for unknown reason
myDlg.show()
if myDlg.OK: #unpack information from dialogue box
thisInfo = myDlg.data #this will be a list of data returned from each field added in order
if not autopilot:
name=thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: #if entered something
subject = name #change subject default name to what user entered
if doStaircase:
if len(thisInfo[dlgLabelsOrdered.index('staircaseTrials')]) >0:
staircaseTrials = int( thisInfo[ dlgLabelsOrdered.index('staircaseTrials') ] ) #convert string to integer
print('staircaseTrials entered by user=',staircaseTrials)
logging.info('staircaseTrials entered by user=',staircaseTrials)
if len(thisInfo[dlgLabelsOrdered.index('easyTrials')]) >0:
prefaceStaircaseTrialsN = int( thisInfo[ dlgLabelsOrdered.index('easyTrials') ] ) #convert string to integer
print('prefaceStaircaseTrialsN entered by user=',thisInfo[dlgLabelsOrdered.index('easyTrials')])
logging.info('prefaceStaircaseTrialsN entered by user=',prefaceStaircaseTrialsN)
else: #not doing staircase
trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer
print('trialsPerCondition=',trialsPerCondition)
logging.info('trialsPerCondition =',trialsPerCondition)
defaultNoiseLevel = int (thisInfo[ dlgLabelsOrdered.index('defaultNoiseLevel') ])
else:
print('User cancelled from dialog box.')
logging.flush()
core.quit()
if not demo:
allowGUI = False
myWin = openMyStimWindow()
#set up output data file, log file, copy of program code, and logging
infix = ''
if doStaircase:
infix = 'staircase_'
fileName = os.path.join(dataDir, subject + '_' + infix+ timeAndDateStr)
if not demo and not exportImages:
dataFile = open(fileName+'.txt', 'w')
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileName + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logFname = fileName+'.log'
ppLogF = logging.LogFile(logFname,
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#errors, data and warnings will be sent to this logfile
if demo or exportImages:
dataFile = sys.stdout; logF = sys.stdout
logging.console.setLevel(logging.ERROR) #only show this level messages and higher
logging.console.setLevel(logging.ERROR) #DEBUG means set console to receive nearly all messges, INFO next level, EXP, DATA, WARNING and ERROR
if fullscr and not demo and not exportImages:
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=False, ## True means report on everything
userProcsDetailed=True, ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
#randomSeed='set:42', ## a way to record, and optionally set, a random seed of type str for making reproducible random sequences
## None -> default
## 'time' will use experimentRuntime.epoch as the value for the seed, different value each time the script is run
##'set:time' --> seed value is set to experimentRuntime.epoch, and initialized: random.seed(info['randomSeed'])
##'set:42' --> set & initialize to str('42'), and will give the same sequence of random.random() for all runs of the script
)
logging.info(runInfo)
logging.flush()
textStimuliStream1 = list()
def calcAndPredrawStimuli(wordList):
del textStimuliStream1[:]
if len(wordList) < numWordsInStream:
print('Error! Your word list must have at least ',numWordsInStream,'strings')
idxsIntoWordList = np.arange( len(wordList) ) #create a list of indexes of the entire word list
for i in range(0,len(wordList)): #draw the words that will be used on this trial, the first numWordsInStream of the shuffled list
word = wordList[ i ]#
textStimulusStream1 = visual.TextStim(myWin,text=word,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)
textStimulusStream1.setPos([-wordEccentricity,0]) #left
textStimuliStream1.append(textStimulusStream1)
idxsStream1 = idxsIntoWordList[range(0,len(wordList)-1)] #first RSVP stream
np.random.shuffle(idxsStream1)
return idxsStream1
#create click sound for keyboard
try:
click=sound.Sound('406__tictacshutup__click-1-d.wav')
except: #in case file missing, create inferiro click manually
logging.warn('Could not load the desired click sound file, instead using manually created inferior click')
click=sound.Sound('D',octave=4, sampleRate=22050, secs=0.015, bits=8)
if showRefreshMisses:
fixSizePix = 32 #2.6 #make fixation bigger so flicker more conspicuous
else: fixSizePix = 32
fixColor = [1,1,1]
if exportImages: fixColor= [0,0,0]
fixatnNoiseTexture = np.round( np.random.rand(fixSizePix/4,fixSizePix/4) ,0 ) *2.0-1 #Can counterphase flicker noise texture to create salient flicker if you break fixation
fixation= visual.PatchStim(myWin, tex=fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False)
fixationBlank= visual.PatchStim(myWin, tex= -1*fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False) #reverse contrast
fixationPoint= visual.PatchStim(myWin,tex='none',colorSpace='rgb',color=(1,1,1),size=10,units='pix',autoLog=autoLogging)
respPromptStim = visual.TextStim(myWin,pos=(0, -.9),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim = visual.TextStim(myWin,pos=(0, -.8),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim.setText('Hit ENTER to accept. Backspace to edit')
respStim = visual.TextStim(myWin,pos=(0,0),colorSpace='rgb',color=(1,1,0),alignHoriz='center', alignVert='center',height=3,units='deg',autoLog=autoLogging)
clickSound, badKeySound = stringResponse.setupSoundsForResponse()
requireAcceptance = False
nextText = visual.TextStim(myWin,pos=(0, .1),colorSpace='rgb',color = (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
NextRemindCountText = visual.TextStim(myWin,pos=(0,.2),colorSpace='rgb',color= (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
screenshot= False; screenshotDone = False
stimList = []
#SETTING THE CONDITIONS
cuePositions = np.array([0]) # [4,10,16,22] used in Martini E2, group 2
for cuePos in cuePositions:
for rightResponseFirst in [False,True]:
for bothWordsFlipped in [False,True]:
for wordListThis in [1,2]:
stimList.append( {'cuePos':cuePos, 'rightResponseFirst':rightResponseFirst, 'wordList':wordListThis,
'leftStreamFlip':bothWordsFlipped, 'rightStreamFlip':bothWordsFlipped} )
trials = data.TrialHandler(stimList,trialsPerCondition) #constant stimuli method
trialsForPossibleStaircase = data.TrialHandler(stimList,trialsPerCondition) #independent randomization, just to create random trials for staircase phase
numRightWrongEachCuepos = np.zeros([ len(cuePositions), 1 ]); #summary results to print out at end
logging.info( 'numtrials=' + str(trials.nTotal) + ' and each trialDurFrames='+str(trialDurFrames)+' or '+str(trialDurFrames*(1000./refreshRate))+ \
' ms' + ' task=' + task)
def numberToLetter(number): #0 = A, 25 = Z
#if it's not really a letter, return @
if number < 0 or number > 25:
return ('@')
else: #it's probably a letter
try:
return chr( ord('A')+number )
except:
return('@')
def letterToNumber(letter): #A = 0, Z = 25
#if it's not really a letter, return -999
#HOW CAN I GENERICALLY TEST FOR LENGTH. EVEN IN CASE OF A NUMBER THAT' SNOT PART OF AN ARRAY?
try:
#if len(letter) > 1:
# return (-999)
if letter < 'A' or letter > 'Z':
return (-999)
else: #it's a letter
return ord(letter)-ord('A')
except:
return (-999)
def wordToIdx(word,wordList):
#if it's not in the list of stimuli, return -999
try:
#http://stackoverflow.com/questions/7102050/how-can-i-get-a-python-generator-to-return-none-rather-than-stopiteration
firstMatchIdx = next((i for i, val in enumerate(wordList) if val.upper()==word), None) #return i (index) unless no matches, in which case return None
#print('Looked for ',word,' in ',wordList1,'\nfirstMatchIdx =',firstMatchIdx)
return firstMatchIdx
except:
print('Unexpected error in wordToIdx with word=',word)
return (None)
#print header for data file
print('experimentPhase\ttrialnum\tsubject\ttask\t',file=dataFile,end='')
print('noisePercent\tleftStreamFlip\trightStreamFlip\t',end='',file=dataFile)
if task=='T1':
numRespsWanted = 1
dataFile.write('rightResponseFirst\t')
for i in range(numRespsWanted):
dataFile.write('cuePos'+str(i)+'\t') #have to use write to avoid ' ' between successive text, at least until Python 3
dataFile.write('answer'+str(i)+'\t')
dataFile.write('response'+str(i)+'\t')
dataFile.write('correct'+str(i)+'\t')
# dataFile.write('responsePosRelative'+str(i)+'\t')
print('timingBlips',file=dataFile)
#end of header
def oneFrameOfStim( n,cue,seq1,cueDurFrames,letterDurFrames,ISIframes,thisTrial,textStimuliStream1,
noise,proportnNoise,allFieldCoords,numNoiseDots ):
#defining a function to draw each frame of stim.
#seq1 is an array of indices corresponding to the appropriate pre-drawn stimulus, contained in textStimuli
SOAframes = letterDurFrames+ISIframes
cueFrames = thisTrial['cuePos']*SOAframes #cuesPos is global variable
stimN = int( np.floor(n/SOAframes) )
frameOfThisLetter = n % SOAframes #every SOAframes, new letter
showLetter = frameOfThisLetter < letterDurFrames #if true, it's not time for the blank ISI. it's still time to draw the letter
#print 'n=',n,' SOAframes=',SOAframes, ' letterDurFrames=', letterDurFrames, ' (n % SOAframes) =', (n % SOAframes) #DEBUGOFF
thisStimIdx = seq1[stimN] #which letter, from A to Z (1 to 26), should be shown?
#so that any timing problems occur just as often for every frame, always draw the letter and the cue, but simply draw it in the bgColor when it's not meant to be on
cue.setLineColor( bgColor )
if type(cueFrames) not in [tuple,list,np.ndarray]: #scalar. But need collection to do loop based on it
cueFrames = list([cueFrames])
for cueFrame in cueFrames: #cheTck whether it's time for any cue
if n>=cueFrame and n<cueFrame+cueDurFrames:
cue.setLineColor( cueColor )
if showLetter:
textStimuliStream1[thisStimIdx].setColor( letterColor )
else:
textStimuliStream1[thisStimIdx].setColor( bgColor )
textStimuliStream1[thisStimIdx].flipVert = thisTrial['leftStreamFlip']
textStimuliStream1[thisStimIdx].flipHoriz = thisTrial['leftStreamFlip']
textStimuliStream1[thisStimIdx].draw()
cue.draw()
refreshNoise = False #Not recommended because takes longer than a frame, even to shuffle apparently. Or may be setXYs step
if proportnNoise>0 and refreshNoise:
if frameOfThisLetter ==0:
np.random.shuffle(allFieldCoords)
dotCoords = allFieldCoords[0:numNoiseDots]
noise.setXYs(dotCoords)
if proportnNoise>0:
noise.draw()
return True
# #######End of function definition that displays the stimuli!!!! #####################################
#############################################################################################################################
cue = visual.Circle(myWin,
radius=cueRadius,#Martini used circles with diameter of 12 deg
lineColorSpace = 'rgb',
lineColor=bgColor,
lineWidth=4.0, #in pixels. Was thinner (2 pixels) in letter AB experiments
units = 'deg',
fillColorSpace = 'rgb',
fillColor=None, #beware, with convex shapes fill colors don't work
pos= [0,0], #the anchor (rotation and vertices are position with respect to this)
interpolate=True,
autoLog=False)#this stim changes too much for autologging to be useful
ltrHeight = 2.5 #Martini letters were 2.5deg high
#All noise dot coordinates ultimately in pixels, so can specify each dot is one pixel
noiseFieldWidthDeg=ltrHeight *1.0
noiseFieldWidthPix = int( round( noiseFieldWidthDeg*pixelperdegree ) )
def timingCheckAndLog(ts,trialN):
#check for timing problems and log them
#ts is a list of the times of the clock after each frame
interframeIntervs = np.diff(ts)*1000
#print ' interframe intervs were ',around(interframeIntervs,1) #DEBUGOFF
frameTimeTolerance=.3 #proportion longer than refreshRate that will not count as a miss
longFrameLimit = np.round(1000/refreshRate*(1.0+frameTimeTolerance),2)
idxsInterframeLong = np.where( interframeIntervs > longFrameLimit ) [0] #frames that exceeded 150% of expected duration
numCasesInterframeLong = len( idxsInterframeLong )
if numCasesInterframeLong >0 and (not demo):
longFramesStr = 'ERROR,'+str(numCasesInterframeLong)+' frames were longer than '+str(longFrameLimit)+' ms'
if demo:
longFramesStr += 'not printing them all because in demo mode'
else:
longFramesStr += ' apparently screen refreshes skipped, interframe durs were:'+\
str( np.around( interframeIntervs[idxsInterframeLong] ,1 ) )+ ' and was these frames: '+ str(idxsInterframeLong)
if longFramesStr != None:
logging.error( 'trialnum='+str(trialN)+' '+longFramesStr )
if not demo:
flankingAlso=list()
for idx in idxsInterframeLong: #also print timing of one before and one after long frame
if idx-1>=0:
flankingAlso.append(idx-1)
else: flankingAlso.append(np.NaN)
flankingAlso.append(idx)
if idx+1<len(interframeIntervs): flankingAlso.append(idx+1)
else: flankingAlso.append(np.NaN)
flankingAlso = np.array(flankingAlso)
flankingAlso = flankingAlso[np.negative(np.isnan(flankingAlso))] #remove nan values
flankingAlso = flankingAlso.astype(np.integer) #cast as integers, so can use as subscripts
logging.info( 'flankers also='+str( np.around( interframeIntervs[flankingAlso], 1) ) ) #because this is not an essential error message, as previous one already indicates error
#As INFO, at least it won't fill up the console when console set to WARNING or higher
return numCasesInterframeLong
#end timing check
trialClock = core.Clock()
numTrialsCorrect = 0;
numTrialsApproxCorrect = 0;
numTrialsEachCorrect= np.zeros( numRespsWanted )
numTrialsEachApproxCorrect= np.zeros( numRespsWanted )
def do_RSVP_stim(thisTrial, seq1, proportnNoise,trialN):
#relies on global variables:
# textStimuli, logging, bgColor
# thisTrial should have 'cuePos'
global framesSaved #because change this variable. Can only change a global variable if you declare it
cuesPos = [] #will contain the positions in the stream of all the cues (targets)
cuesPos.append(thisTrial['cuePos'])
cuesPos = np.array(cuesPos)
noise = None; allFieldCoords=None; numNoiseDots=0
if proportnNoise > 0: #gtenerating noise is time-consuming, so only do it once per trial. Then shuffle noise coordinates for each letter
(noise,allFieldCoords,numNoiseDots) = createNoise(proportnNoise,myWin,noiseFieldWidthPix, bgColor)
preDrawStimToGreasePipeline = list() #I don't know why this works, but without drawing it I have consistent timing blip first time that draw ringInnerR for phantom contours
cue.setLineColor(bgColor)
preDrawStimToGreasePipeline.extend([cue])
for stim in preDrawStimToGreasePipeline:
stim.draw()
myWin.flip(); myWin.flip()
#end preparation of stimuli
core.wait(.1);
trialClock.reset()
fixatnPeriodMin = 0.3
fixatnPeriodFrames = int( (np.random.rand(1)/2.+fixatnPeriodMin) *refreshRate) #random interval between 800ms and 1.3s
ts = list(); #to store time of each drawing, to check whether skipped frames
for i in range(fixatnPeriodFrames+20): #prestim fixation interval
#if i%4>=2 or demo or exportImages: #flicker fixation on and off at framerate to see when skip frame
# fixation.draw()
#else: fixationBlank.draw()
fixationPoint.draw()
myWin.flip() #end fixation interval
#myWin.setRecordFrameIntervals(True); #can't get it to stop detecting superlong frames
t0 = trialClock.getTime()
# adding a noise mask
noiseMaskDur = 0.20
noiseMaskFrames = int(noiseMaskDur *refreshRate)
for i in range(noiseMaskFrames):
#unicodeStuff = visual.TextStim(myWin,
# text = u"unicode (eg \u03A8 \u040A \u03A3)",#you can find the unicode character value from MS Word 'insert symbol'
# color='black', font=serif,pos=(0,3),
# height = 1
myPatch1 = visual.TextStim(myWin, text = u"###",pos=(wordEccentricity, 0),height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging )
myPatch1.draw()
myWin.flip()
for n in range(trialDurFrames): #this is the loop for this trial's stimulus!
worked = oneFrameOfStim( n,cue,seq1,cueDurFrames,letterDurFrames,ISIframes,thisTrial,textStimuliStream1,
noise,proportnNoise,allFieldCoords,numNoiseDots ) #draw letter and possibly cue and noise on top
fixationPoint.draw()
if exportImages:
myWin.getMovieFrame(buffer='back') #for later saving
framesSaved +=1
myWin.flip()
t=trialClock.getTime()-t0; ts.append(t);
#end of big stimulus loop
myWin.setRecordFrameIntervals(False);
# adding a noise mask
noiseMaskDur = 0.3
noiseMaskFrames = int(noiseMaskDur *refreshRate)
for i in range(noiseMaskFrames):
noiseTexture = scipy.random.rand(128,128)*2.0-1
myPatch1 = visual.GratingStim(myWin, tex=noiseTexture, pos=(wordEccentricity, 0),
size=(5,2), units='deg',
interpolate=False,
autoLog=False)#this stim changes too much for autologging to be useful
myPatch1.phase += (1 / 128.0, 0.5 / 128.0) # increment by (1, 0.5) pixels per frame
myPatch1.draw()
myWin.flip()
if task=='T1':
respPromptStim.setText('What was the word?',log=False)
else: respPromptStim.setText('Error: unexpected task',log=False)
postCueNumBlobsAway=-999 #doesn't apply to non-tracking and click tracking task
correctAnswerIdxsStream1 = np.array( seq1[cuesPos] )
return cuesPos,correctAnswerIdxsStream1,ts
def handleAndScoreResponse(passThisTrial,response,responseAutopilot,task,stimSequence,cuePos,correctAnswerIdx,wordListThis):
#Handle response, calculate whether correct, ########################################
#responses are actual characters
#correctAnswer is index into stimSequence
#autopilot is global variable
print("scoring response using wordList=",wordListThis, 'correctAnswerIdx=',correctAnswerIdx)
if autopilot or passThisTrial:
response = responseAutopilot
#print('handleAndScoreResponse correctAnswerIdxs=',correctAnswerIdxs,'\nstimSequence=',stimSequence, '\nwords=',wordList)
correct = 0
#approxCorrect = 0
#posOfResponse = -999
#responsePosRelative = -999
correctAnswer = wordListThis[correctAnswerIdx].upper()
responseString= ''.join(['%s' % char for char in response])
responseString= responseString.upper()
responseString = responseString.replace(" ", "") #delete spaces
correctAnswer = correctAnswer.replace(" ", "") #delete spaces
print('correctAnswer="',correctAnswer ,'" responseString="',responseString,'"',sep='')
if correctAnswer == responseString:
correct = 1
#print('correct=',correct)
responseWordIdx = wordToIdx(responseString,wordListThis)
#if responseWordIdx is None: #response is not in the wordList
# posOfResponse = -999
# logging.warn('Response was not present in the stimulus stream')
#else:
# posOfResponse= np.where( responseWordIdx==stimSequence )
# posOfResponse= posOfResponse[0] #list with two entries, want first which will be array of places where the response was found in the sequence
# if len(posOfResponse) > 1:
# logging.error('Expected response to have occurred in only one position in stream')
# posOfResponse = posOfResponse[0] #first element of list (should be only one element long
# responsePosRelative = posOfResponse - cuePos
# approxCorrect = abs(responsePosRelative)<= 3 #Vul efficacy measure of getting it right to within plus/minus
#print('wordToIdx(',responseString,',',wordList,')=',responseWordIdx,' stimSequence=',stimSequence,'\nposOfResponse = ',posOfResponse) #debugON
#print response stuff to dataFile
#header was answerPos0, answer0, response0, correct0, responsePosRelative0
print(cuePos,'\t', end='', file=dataFile)
print(correctAnswer, '\t', end='', file=dataFile) #answer0
print(responseString, '\t', end='', file=dataFile) #response0
print(correct, '\t', end='',file=dataFile) #correct0
# print(responsePosRelative, '\t', end='',file=dataFile) #responsePosRelative0
return correct
#end handleAndScoreResponses
def play_high_tone_correct_low_incorrect(correct, passThisTrial=False):
highA = sound.Sound('G',octave=5, sampleRate=6000, secs=.3, bits=8)
low = sound.Sound('F',octave=3, sampleRate=6000, secs=.3, bits=8)
highA.setVolume(0.9)
low.setVolume(1.0)
if correct:
highA.play()
elif passThisTrial:
high= sound.Sound('G',octave=4, sampleRate=2000, secs=.08, bits=8)
for i in range(2):
high.play(); low.play();
else: #incorrect
low.play()
expStop=False
nDoneMain = -1 #change to zero once start main part of experiment
if doStaircase:
#create the staircase handler
useQuest = True
if useQuest:
staircase = data.QuestHandler(startVal = 95,
startValSd = 80,
stopInterval= 1, #sd of posterior has to be this small or smaller for staircase to stop, unless nTrials reached
nTrials = staircaseTrials,
#extraInfo = thisInfo,
pThreshold = threshCriterion, #0.25,
gamma = 1./26,
delta=0.02, #lapse rate, I suppose for Weibull function fit
method = 'quantile', #uses the median of the posterior as the final answer
stepType = 'log', #will home in on the 80% threshold. But stepType = 'log' doesn't usually work
minVal=1, maxVal = 100
)
print('created QUEST staircase')
else:
stepSizesLinear = [.2,.2,.1,.1,.05,.05]
stepSizesLog = [log(1.4,10),log(1.4,10),log(1.3,10),log(1.3,10),log(1.2,10)]
staircase = data.StairHandler(startVal = 0.1,
stepType = 'log', #if log, what do I want to multiply it by
stepSizes = stepSizesLog, #step size to use after each reversal
minVal=0, maxVal=1,
nUp=1, nDown=3, #will home in on the 80% threshold
nReversals = 2, #The staircase terminates when nTrials have been exceeded, or when both nReversals and nTrials have been exceeded
nTrials=1)
print('created conventional staircase')
if prefaceStaircaseTrialsN > len(prefaceStaircaseNoise): #repeat array to accommodate desired number of easyStarterTrials
prefaceStaircaseNoise = np.tile( prefaceStaircaseNoise, ceil( prefaceStaircaseTrialsN/len(prefaceStaircaseNoise) ) )
prefaceStaircaseNoise = prefaceStaircaseNoise[0:prefaceStaircaseTrialsN]
phasesMsg = ('Doing '+str(prefaceStaircaseTrialsN)+'trials with noisePercent= '+str(prefaceStaircaseNoise)+' then doing a max '+str(staircaseTrials)+'-trial staircase')
print(phasesMsg); logging.info(phasesMsg)
#staircaseStarterNoise PHASE OF EXPERIMENT
corrEachTrial = list() #only needed for easyStaircaseStarterNoise
staircaseTrialN = -1; mainStaircaseGoing = False
while (not staircase.finished) and expStop==False: #staircase.thisTrialN < staircase.nTrials
if staircaseTrialN+1 < len(prefaceStaircaseNoise): #still doing easyStaircaseStarterNoise
staircaseTrialN += 1
noisePercent = prefaceStaircaseNoise[staircaseTrialN]
else:
if staircaseTrialN+1 == len(prefaceStaircaseNoise): #add these non-staircase trials so QUEST knows about them
mainStaircaseGoing = True
print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise)
staircase.importData(100-prefaceStaircaseNoise, np.array(corrEachTrial))
printStaircase(staircase, descendingPsycho, briefTrialUpdate=False, printInternalVal=True, alsoLog=False)
try: #advance the staircase
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)
noisePercent = 100. - staircase.next() #will step through the staircase, based on whether told it (addResponse) got it right or wrong
staircaseTrialN += 1
except StopIteration: #Need this here, even though test for finished above. I can't understand why finished test doesn't accomplish this.
print('stopping because staircase.next() returned a StopIteration, which it does when it is finished')
break #break out of the trials loop
#print('staircaseTrialN=',staircaseTrialN)
if thisTrial['wordList'] == 1:
wordList = wordList1
else: wordList = wordList2
idxsStream1 = calcAndPredrawStimuli(wordList)
correctAnswerIdxsStream1, ts = \
do_RSVP_stim(cuePos, idxsStream1, noisePercent/100.,staircaseTrialN)
numCasesInterframeLong = timingCheckAndLog(ts,staircaseTrialN)
expStop,passThisTrial,responses,responsesAutopilot = \
stringResponse.collectStringResponse(numRespsWanted,respPromptStim,respStim,acceptTextStim,myWin,clickSound,badKeySound,
requireAcceptance,autopilot,responseDebug=True)
if not expStop:
if mainStaircaseGoing:
print('staircase\t', end='', file=dataFile)
else:
print('staircase_preface\t', end='', file=dataFile)
#header start 'trialnum\tsubject\ttask\t'
print(staircaseTrialN,'\t', end='', file=dataFile) #first thing printed on each line of dataFile
print(subject,'\t',task,'\t', round(noisePercent,2),'\t', end='', file=dataFile)
correct,approxCorrect,responsePosRelative= handleAndScoreResponse(
passThisTrial,responses,responseAutopilot,task,sequenceLeft,cuesPos[0],correctAnswerIdx,wordList )
print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile
core.wait(.06)
if feedback:
play_high_tone_correct_low_incorrect(correct, passThisTrial=False)
print('staircaseTrialN=', staircaseTrialN,' noisePercent=',round(noisePercent,3),' T1approxCorrect=',T1approxCorrect) #debugON
corrEachTrial.append(T1approxCorrect)
if mainStaircaseGoing:
staircase.addResponse(T1approxCorrect, intensity = 100-noisePercent) #Add a 1 or 0 to signify a correct/detected or incorrect/missed trial
#print('Have added an intensity of','{:.3f}'.format(100-noisePercent), 'T1approxCorrect =', T1approxCorrect, ' to staircase') #debugON
#ENDING STAIRCASE PHASE
if staircaseTrialN+1 < len(prefaceStaircaseNoise) and (staircaseTrialN>=0): #exp stopped before got through staircase preface trials, so haven't imported yet
print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise[0:staircaseTrialN+1])
staircase.importData(100-prefaceStaircaseNoise[0:staircaseTrialN], np.array(corrEachTrial))
print('framesSaved after staircase=',framesSaved) #debugON
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = ('prefaceStaircase phase' if expStop else '')
msg += ('ABORTED' if expStop else 'Finished') + ' staircase part of experiment at ' + timeAndDateStr
logging.info(msg); print(msg)
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)
#print('staircase.quantile=',round(staircase.quantile(),2),' sd=',round(staircase.sd(),2))
threshNoise = round(staircase.quantile(),3)
if descendingPsycho:
threshNoise = 100- threshNoise
threshNoise = max( 0, threshNoise ) #e.g. ff get all trials wrong, posterior peaks at a very negative number
msg= 'Staircase estimate of threshold = ' + str(threshNoise) + ' with sd=' + str(round(staircase.sd(),2))
logging.info(msg); print(msg)
myWin.close()
#Fit and plot data
fit = None
try:
intensityForCurveFitting = staircase.intensities
if descendingPsycho:
intensityForCurveFitting = 100-staircase.intensities #because fitWeibull assumes curve is ascending
fit = data.FitWeibull(intensityForCurveFitting, staircase.data, expectedMin=1/26., sems = 1.0/len(staircase.intensities))
except:
print("Fit failed.")
plotDataAndPsychometricCurve(staircase,fit,descendingPsycho,threshCriterion)
#save figure to file
pylab.savefig(fileName+'.pdf')
print('The plot has been saved, as '+fileName+'.pdf')
pylab.show() #must call this to actually show plot
else: #not staircase
noisePercent = defaultNoiseLevel
phasesMsg = 'Experiment will have '+str(trials.nTotal)+' trials. Letters will be drawn with superposed noise of ' + "{:.2%}".format(defaultNoiseLevel)
print(phasesMsg); logging.info(phasesMsg)
nDoneMain =0
while nDoneMain < trials.nTotal and expStop==False: #MAIN EXPERIMENT LOOP
if nDoneMain==0:
msg='Starting main (non-staircase) part of experiment'
logging.info(msg); print(msg)
thisTrial = trials.next() #get a proper (non-staircase) trial
if thisTrial['wordList'] == 1:
wordList = wordList1
else: wordList = wordList2
print("drawing stimuli using wordList=",wordList)
sequenceStream1 = calcAndPredrawStimuli(wordList)
cuesPos,correctAnswerIdxsStream1, ts = \
do_RSVP_stim(thisTrial, sequenceStream1, noisePercent/100.,nDoneMain)
numCasesInterframeLong = timingCheckAndLog(ts,nDoneMain)
#call for each response
expStop = list(); passThisTrial = list(); responses=list(); responsesAutopilot=list()
numCharsInResponse = len(wordList[0])
dL = [None]*numRespsWanted #dummy list for null values
expStop = copy.deepcopy(dL); responses = copy.deepcopy(dL); responsesAutopilot = copy.deepcopy(dL); passThisTrial=copy.deepcopy(dL)
responseOrder = [0]
for i in responseOrder:
x = 3* wordEccentricity*(i*2-1) #put it 3 times farther out than stimulus, so participant is sure which is left and which right
expStop[i],passThisTrial[i],responses[i],responsesAutopilot[i] = stringResponse.collectStringResponse(
numCharsInResponse,x,respPromptStim,respStim,acceptTextStim,fixationPoint,myWin,clickSound,badKeySound,
requireAcceptance,autopilot,responseDebug=True)
expStop = np.array(expStop).any(); passThisTrial = np.array(passThisTrial).any()
if not expStop:
print('main\t', end='', file=dataFile) #first thing printed on each line of dataFile to indicate main part of experiment, not staircase
print(nDoneMain,'\t', end='', file=dataFile)
print(subject,'\t',task,'\t', round(noisePercent,3),'\t', end='', file=dataFile)
print(thisTrial['leftStreamFlip'],'\t', end='', file=dataFile)
print(thisTrial['rightStreamFlip'],'\t', end='', file=dataFile)
print(thisTrial['rightResponseFirst'],'\t', end='', file=dataFile)
i = 0
eachCorrect = np.ones(numRespsWanted)*-999
for i in range(numRespsWanted): #scored and printed to dataFile in left first, right second order even if collected in different order
sequenceStream = sequenceStream1; correctAnswerIdxs = correctAnswerIdxsStream1;
print("about to score response using wordList=",wordList)
correct = (
handleAndScoreResponse(passThisTrial,responses[i],responsesAutopilot[i],task,sequenceStream,thisTrial['cuePos'],correctAnswerIdxs,wordList ) )
eachCorrect[i] = correct
print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile
print('correct=',correct,'eachCorrect=',eachCorrect)
numTrialsCorrect += eachCorrect.all() #so count -1 as 0
numTrialsEachCorrect += eachCorrect #list numRespsWanted long
if exportImages: #catches one frame of response
myWin.getMovieFrame() #I cant explain why another getMovieFrame, and core.wait is needed
framesSaved +=1; core.wait(.1)
myWin.saveMovieFrames('images_sounds_movies/frames.png') #mov not currently supported
expStop=True
core.wait(.1)
if feedback: play_high_tone_correct_low_incorrect(correct, passThisTrial=False)
nDoneMain+=1
dataFile.flush(); logging.flush()
print('nDoneMain=', nDoneMain,' trials.nTotal=',trials.nTotal) #' trials.thisN=',trials.thisN
if (trials.nTotal > 6 and nDoneMain > 2 and nDoneMain %
( trials.nTotal*pctCompletedBreak/100. ) ==1): #dont modulus 0 because then will do it for last trial
nextText.setText('Press "SPACE" to continue!')
nextText.draw()
progressMsg = 'Completed ' + str(nDoneMain) + ' of ' + str(trials.nTotal) + ' trials'
NextRemindCountText.setText(progressMsg)
NextRemindCountText.draw()
myWin.flip() # myWin.flip(clearBuffer=True)
waiting=True
while waiting:
if autopilot: break
elif expStop == True:break
for key in event.getKeys(): #check if pressed abort-type key
if key in ['space','ESCAPE']:
waiting=False
if key in ['ESCAPE']:
expStop = True
myWin.clearBuffer()
core.wait(.2); time.sleep(.2)
#end main trials loop
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = 'Finishing at '+timeAndDateStr
print(msg); logging.info(msg)
if expStop:
msg = 'user aborted experiment on keypress with trials done=' + str(nDoneMain) + ' of ' + str(trials.nTotal+1)
print(msg); logging.error(msg)
if not doStaircase and (nDoneMain >0):
print('Of ',nDoneMain,' trials, on ',numTrialsCorrect*1.0/nDoneMain*100., '% of all trials all targets reported exactly correct',sep='')
for i in range(numRespsWanted):
print('stream',i,': ',round(numTrialsEachCorrect[i]*1.0/nDoneMain*100.,2), '% correct',sep='')
logging.flush(); dataFile.close()
myWin.close() #have to close window if want to show a plot
| {
"repo_name": "alexholcombe/twoWords",
"path": "RansleySingleshotVersion/Justine/Hubert_18May2015_14-26.py",
"copies": "2",
"size": "49209",
"license": "mit",
"hash": 3745289546652181500,
"line_mean": 55.0870069606,
"line_max": 209,
"alpha_frac": 0.6636387653,
"autogenerated": false,
"ratio": 3.5783158813263527,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5241954646626352,
"avg_score": null,
"num_lines": null
} |
#Alex Holcombe alex.holcombe@sydney.edu.au
#See the github repository for more information: https://github.com/alexholcombe/twoWords
from __future__ import print_function
from psychopy import monitors, visual, event, data, logging, core, sound, gui
import psychopy.info
import numpy as np
from math import atan, log, ceil
import copy
import time, sys, os, pylab
try:
from noiseStaircaseHelpers import printStaircase, toStaircase, outOfStaircase, createNoise, plotDataAndPsychometricCurve
except ImportError:
print('Could not import from noiseStaircaseHelpers.py (you need that file to be in the same directory)')
try:
import stringResponse
except ImportError:
print('Could not import stringResponse.py (you need that file to be in the same directory)')
wordEccentricity=3
tasks=['T1']; task = tasks[0]
#THINGS THAT COULD PREVENT SUCCESS ON A STRANGE MACHINE
#same screen or external screen? Set scrn=0 if one screen. scrn=1 means display stimulus on second screen.
#widthPix, heightPix
quitFinder = False #if checkRefreshEtc, quitFinder becomes True
autopilot=False
demo=False #False
exportImages= False #quits after one trial
subject='Hubert' #user is prompted to enter true subject name
if autopilot: subject='auto'
if os.path.isdir('.'+os.sep+'data'):
dataDir='data'
else:
print('"data" directory does not exist, so saving data in present working directory')
dataDir='.'
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
showRefreshMisses=True #flicker fixation at refresh rate, to visualize if frames missed
feedback=True
autoLogging=False
refreshRate = 60.; #100
if demo:
refreshRate = 60.; #100
staircaseTrials = 25
prefaceStaircaseTrialsN = 20 #22
prefaceStaircaseNoise = np.array([5,20,20,20, 50,50,50,5,80,80,80,5,95,95,95]) #will be recycled / not all used, as needed
descendingPsycho = True #psychometric function- more noise means worse performance
threshCriterion = 0.58
numWordsInStream = 11
# wordsUnparsed="the, and, for, you, say, but, his, not, she, can, who, get, her, all, one, out, see, him, now, how, its, our, two, way"#24 most common words
# lettersUnparsed = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".upper()
#High freq word list
wordsUnparsedHigh = "high, high, high, high, high, high, high, high, high, high, high, high"
wordsUnparsedHigh = wordsUnparsedHigh.upper()
wordListHigh = wordsUnparsedHigh.split(",") #split into list
for i in range(len(wordListHigh)):
wordListHigh[i] = wordListHigh[i].replace(" ", "") #delete spaces
#Low frequency word list
wordsUnparsedLow="low, low, low, low, low, low, low, low, low, low, low, low"
wordsUnparsedLow = wordsUnparsedLow.upper()
wordListLow = wordsUnparsedLow.split(",") #split into list
for i in range(len(wordListLow)):
wordListLow[i] = wordListLow[i].replace(" ", "") #delete spaces
bgColor = [-.7,-.7,-.7] # [-1,-1,-1]
cueColor = [1.,1.,1.]
letterColor = [1.,1.,1.]
cueRadius = 7 #6 deg in Goodbourn & Holcombe
widthPix= 1280 #monitor width in pixels of Agosta
heightPix= 800 #800 #monitor height in pixels
monitorwidth = 38.7 #monitor width in cm
scrn=0 #0 to use main screen, 1 to use external screen connected to computer
fullscr=True #True to use fullscreen, False to not. Timing probably won't be quite right if fullscreen = False
allowGUI = False
if demo: monitorwidth = 23#18.0
if exportImages:
widthPix = 600; heightPix = 600
monitorwidth = 13.0
fullscr=False; scrn=0
framesSaved=0
if demo:
scrn=0; fullscr=False
widthPix = 800; heightPix = 600
monitorname='testMonitor'
allowGUI = True
viewdist = 57. #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)
print('pixelperdegree=',pixelperdegree)
# create a dialog from dictionary
infoFirst = { 'Do staircase (only)': False, 'Check refresh etc':True, 'Fullscreen (timing errors if not)': False, 'Screen refresh rate':refreshRate }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='Dual-RSVP experiment OR staircase to find thresh noise level for performance criterion',
order=['Do staircase (only)', 'Check refresh etc', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating'},
#fixed=['Check refresh etc'])#this attribute can't be changed by the user
)
if not OK.OK:
print('User cancelled from dialog box'); core.quit()
doStaircase = infoFirst['Do staircase (only)']
checkRefreshEtc = infoFirst['Check refresh etc']
fullscr = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
if checkRefreshEtc:
quitFinder = True
if quitFinder:
import os
applescript="\'tell application \"Finder\" to quit\'"
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
#letter size 2.5 deg
SOAms = 133 #Battelli, Agosta, Goodbourn, Holcombe mostly using 133
#Minimum SOAms should be 84 because any shorter, I can't always notice the second ring when lag1. 71 in Martini E2 and E1b (actually he used 66.6 but that's because he had a crazy refresh rate of 90 Hz)
letterDurMs = 80 #23.6 in Martini E2 and E1b (actually he used 22.2 but that's because he had a crazy refresh rate of 90 Hz)
ISIms = SOAms - letterDurMs
letterDurFrames = int( np.floor(letterDurMs / (1000./refreshRate)) )
cueDurFrames = letterDurFrames
ISIframes = int( np.floor(ISIms / (1000./refreshRate)) )
#have set ISIframes and letterDurFrames to integer that corresponds as close as possible to originally intended ms
rateInfo = 'total SOA=' + str(round( (ISIframes + letterDurFrames)*1000./refreshRate, 2)) + ' or ' + str(ISIframes + letterDurFrames) + ' frames, comprising\n'
rateInfo+= 'ISIframes ='+str(ISIframes)+' or '+str(ISIframes*(1000./refreshRate))+' ms and letterDurFrames ='+str(letterDurFrames)+' or '+str(round( letterDurFrames*(1000./refreshRate), 2))+'ms'
logging.info(rateInfo); print(rateInfo)
trialDurFrames = int( numWordsInStream*(ISIframes+letterDurFrames) ) #trial duration in frames
print('trialdurframes is' + str(trialDurFrames))
monitorname = 'testmonitor'
waitBlank = False
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#relying on monitorwidth cm (39 for Mitsubishi to do deg calculations) and gamma info in calibratn
mon.setSizePix( (widthPix,heightPix) )
units='deg' #'cm'
def openMyStimWindow(): #make it a function because have to do it several times, want to be sure is identical each time
myWin = visual.Window(monitor=mon,size=(widthPix,heightPix),allowGUI=allowGUI,units=units,color=bgColor,colorSpace='rgb',fullscr=fullscr,screen=scrn,waitBlanking=waitBlank) #Holcombe lab monitor
return myWin
myWin = openMyStimWindow()
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: #checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
#print(runInfo)
logging.info(runInfo)
print('Finished runInfo- which assesses the refresh and processes of this computer')
#check screen refresh is what assuming it is ##############################################
Hzs=list()
myWin.flip(); myWin.flip();myWin.flip();myWin.flip();
myWin.setRecordFrameIntervals(True) #otherwise myWin.fps won't work
print('About to measure frame flips')
for i in range(50):
myWin.flip()
Hzs.append( myWin.fps() ) #varies wildly on successive runs!
myWin.setRecordFrameIntervals(False)
# end testing of screen refresh########################################################
Hzs = np.array( Hzs ); Hz= np.median(Hzs)
msPerFrame= 1000./Hz
refreshMsg1= 'Frames per second ~='+ str( np.round(Hz,1) )
refreshRateTolerancePct = 3
pctOff = abs( (np.median(Hzs)-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
defaultNoiseLevel = 0.0 #to use if no staircase, can be set by user
trialsPerCondition = 50 #default value
dlgLabelsOrdered = list()
if doStaircase:
myDlg = gui.Dlg(title="Staircase to find appropriate noisePercent", pos=(200,400))
else:
myDlg = gui.Dlg(title="RSVP experiment", pos=(200,400))
if not autopilot:
myDlg.addField('Subject name (default="Hubert"):', 'Hubert', tip='or subject code')
dlgLabelsOrdered.append('subject')
if doStaircase:
easyTrialsCondText = 'Num preassigned noise trials to preface staircase with (default=' + str(prefaceStaircaseTrialsN) + '):'
myDlg.addField(easyTrialsCondText, tip=str(prefaceStaircaseTrialsN))
dlgLabelsOrdered.append('easyTrials')
myDlg.addField('Staircase trials (default=' + str(staircaseTrials) + '):', tip="Staircase will run until this number is reached or it thinks it has precise estimate of threshold")
dlgLabelsOrdered.append('staircaseTrials')
pctCompletedBreak = 101
else:
myDlg.addField('\tPercent noise dots=', defaultNoiseLevel, tip=str(defaultNoiseLevel))
dlgLabelsOrdered.append('defaultNoiseLevel')
myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))
dlgLabelsOrdered.append('trialsPerCondition')
pctCompletedBreak = 50
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Screen apparently NOT the desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels!!'
myDlg.addText(msgWrongResolution, color='Red')
logging.error(msgWrongResolution)
print(msgWrongResolution)
myDlg.addText('Note: to abort press ESC at a trials response screen', color=[-1.,1.,-1.]) # color='DimGrey') color names stopped working along the way, for unknown reason
myDlg.show()
if myDlg.OK: #unpack information from dialogue box
thisInfo = myDlg.data #this will be a list of data returned from each field added in order
if not autopilot:
name=thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: #if entered something
subject = name #change subject default name to what user entered
if doStaircase:
if len(thisInfo[dlgLabelsOrdered.index('staircaseTrials')]) >0:
staircaseTrials = int( thisInfo[ dlgLabelsOrdered.index('staircaseTrials') ] ) #convert string to integer
print('staircaseTrials entered by user=',staircaseTrials)
logging.info('staircaseTrials entered by user=',staircaseTrials)
if len(thisInfo[dlgLabelsOrdered.index('easyTrials')]) >0:
prefaceStaircaseTrialsN = int( thisInfo[ dlgLabelsOrdered.index('easyTrials') ] ) #convert string to integer
print('prefaceStaircaseTrialsN entered by user=',thisInfo[dlgLabelsOrdered.index('easyTrials')])
logging.info('prefaceStaircaseTrialsN entered by user=',prefaceStaircaseTrialsN)
else: #not doing staircase
trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer
print('trialsPerCondition=',trialsPerCondition)
logging.info('trialsPerCondition =',trialsPerCondition)
defaultNoiseLevel = int (thisInfo[ dlgLabelsOrdered.index('defaultNoiseLevel') ])
else:
print('User cancelled from dialog box.')
logging.flush()
core.quit()
if not demo:
allowGUI = False
myWin = openMyStimWindow()
#set up output data file, log file, copy of program code, and logging
infix = ''
if doStaircase:
infix = 'staircase_'
fileName = os.path.join(dataDir, subject + '_' + infix+ timeAndDateStr)
if not demo and not exportImages:
dataFile = open(fileName+'.txt', 'w')
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileName + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logFname = fileName+'.log'
ppLogF = logging.LogFile(logFname,
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#errors, data and warnings will be sent to this logfile
if demo or exportImages:
dataFile = sys.stdout; logF = sys.stdout
logging.console.setLevel(logging.ERROR) #only show this level messages and higher
logging.console.setLevel(logging.ERROR) #DEBUG means set console to receive nearly all messges, INFO next level, EXP, DATA, WARNING and ERROR
if fullscr and not demo and not exportImages:
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=False, ## True means report on everything
userProcsDetailed=True, ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
#randomSeed='set:42', ## a way to record, and optionally set, a random seed of type str for making reproducible random sequences
## None -> default
## 'time' will use experimentRuntime.epoch as the value for the seed, different value each time the script is run
##'set:time' --> seed value is set to experimentRuntime.epoch, and initialized: random.seed(info['randomSeed'])
##'set:42' --> set & initialize to str('42'), and will give the same sequence of random.random() for all runs of the script
)
logging.info(runInfo)
logging.flush()
textStimuliStream1 = list()
textStimuliStream2 = list() #used for second, simultaneous RSVP stream
def calcAndPredrawStimuli(wordList1, wordList2):
global textStimuliStream1, textStimuliStream2
del textStimuliStream1[:]
del textStimuliStream2[:]
print('textStimuliStream1 at start of calcAndPredrawStimuli = ', textStimuliStream1)
if len(wordList1) + len(wordList2)< numWordsInStream:
print('Error! Your word list must have at least ',numWordsInStream,'strings')
idxsIntoWordList = np.arange(len(wordList1)-1) #create a list of indexes of the entire word list
print('wordList=',wordListLow + wordListHigh)
for i in range(12): #draw the words that will be used on this trial, the first 26 of the shuffled list
wordleft = wordList1[i]
wordright = wordList2[i]
# #[ idxsIntoWordList[i] ]
textStimulusStream1 = visual.TextStim(myWin,text=wordleft,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)
textStimulusStream2 = visual.TextStim(myWin,text=wordright,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)
textStimulusStream1.setPos([-wordEccentricity,0]) #left
textStimuliStream1.append(textStimulusStream1) #add to list of text stimuli that comprise stream 1
textStimulusStream2.setPos([wordEccentricity,0]) #right
textStimuliStream2.append(textStimulusStream2) #add to list of text stimuli
idxsStream1 = idxsIntoWordList #first RSVP stream
np.random.shuffle(idxsIntoWordList)
idxsStream2 = copy.deepcopy(idxsIntoWordList)
np.random.shuffle(idxsStream2)
return idxsStream1, idxsStream2
#create click sound for keyboard
try:
click=sound.Sound('406__tictacshutup__click-1-d.wav')
except: #in case file missing, create inferiro click manually
logging.warn('Could not load the desired click sound file, instead using manually created inferior click')
click=sound.Sound('D',octave=4, sampleRate=22050, secs=0.015, bits=8)
if showRefreshMisses:
fixSizePix = 32 #2.6 #make fixation bigger so flicker more conspicuous
else: fixSizePix = 32
fixColor = [1,1,1]
if exportImages: fixColor= [0,0,0]
fixatnNoiseTexture = np.round( np.random.rand(fixSizePix/4,fixSizePix/4) ,0 ) *2.0-1 #Can counterphase flicker noise texture to create salient flicker if you break fixation
fixation= visual.PatchStim(myWin, tex=fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False)
fixationBlank= visual.PatchStim(myWin, tex= -1*fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False) #reverse contrast
fixationPoint= visual.PatchStim(myWin,tex='none',colorSpace='rgb',color=(1,1,1),size=4,units='pix',autoLog=autoLogging)
respPromptStim = visual.TextStim(myWin,pos=(0, -.9),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim = visual.TextStim(myWin,pos=(0, -.8),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim.setText('Hit ENTER to accept. Backspace to edit')
respStim = visual.TextStim(myWin,pos=(0,0),colorSpace='rgb',color=(1,1,0),alignHoriz='center', alignVert='center',height=3,units='deg',autoLog=autoLogging)
clickSound, badKeySound = stringResponse.setupSoundsForResponse()
requireAcceptance = False
nextText = visual.TextStim(myWin,pos=(0, .1),colorSpace='rgb',color = (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
NextRemindCountText = visual.TextStim(myWin,pos=(0,.2),colorSpace='rgb',color= (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
screenshot= False; screenshotDone = False
stimList = []
#SETTING THE CONDITIONS
cuePositions = np.array([6,7,8,9,10]) # [4,10,16,22] used in Martini E2, group 2
for cuePos in cuePositions:
for rightResponseFirst in [False,True]:
for bothWordsFlipped in [False]:
for condition in ['HLLR', 'LLHR', 'LLLR', 'HLHR']:
stimList.append( {'cuePos':cuePos, 'rightResponseFirst':rightResponseFirst,
'leftStreamFlip':bothWordsFlipped, 'rightStreamFlip':bothWordsFlipped, 'condition' : condition} )
trials = data.TrialHandler(stimList,trialsPerCondition) #constant stimuli method
#print('N trials is' + str(len(trials)))
trialsForPossibleStaircase = data.TrialHandler(stimList,trialsPerCondition) #independent randomization, just to create random trials for staircase phase
numRightWrongEachCuepos = np.zeros([ len(cuePositions), 1 ]); #summary results to print out at end
logging.info( 'numtrials=' + str(trials.nTotal) + ' and each trialDurFrames='+str(trialDurFrames)+' or '+str(trialDurFrames*(1000./refreshRate))+ \
' ms' + ' task=' + task)
def numberToLetter(number): #0 = A, 25 = Z
#if it's not really a letter, return @
if number < 0 or number > 25:
return ('@')
else: #it's probably a letter
try:
return chr( ord('A')+number )
except:
return('@')
def letterToNumber(letter): #A = 0, Z = 25
#if it's not really a letter, return -999
#HOW CAN I GENERICALLY TEST FOR LENGTH. EVEN IN CASE OF A NUMBER THAT' SNOT PART OF AN ARRAY?
try:
#if len(letter) > 1:
# return (-999)
if letter < 'A' or letter > 'Z':
return (-999)
else: #it's a letter
return ord(letter)-ord('A')
except:
return (-999)
def wordToIdx(word,wordList):
#if it's not in the list of stimuli, return -999
try:
#http://stackoverflow.com/questions/7102050/how-can-i-get-a-python-generator-to-return-none-rather-than-stopiteration
firstMatchIdx = next((i for i, val in enumerate(wordList) if val.upper()==word), None) #return i (index) unless no matches, in which case return None
#print('Looked for ',word,' in ',wordList,'\nfirstMatchIdx =',firstMatchIdx)
return firstMatchIdx
except:
print('Unexpected error in wordToIdx with word=',word)
return (None)
#print header for data file
print('experimentPhase\ttrialnum\tsubject\ttask\t',file=dataFile,end='')
print('noisePercent\tleftStreamFlip\trightStreamFlip\t',end='',file=dataFile)
if task=='T1':
numRespsWanted = 2
dataFile.write('rightResponseFirst\t')
for i in range(numRespsWanted):
dataFile.write('cuePos'+str(i)+'\t') #have to use write to avoid ' ' between successive text, at least until Python 3
dataFile.write('answer'+str(i)+'\t')
dataFile.write('response'+str(i)+'\t')
dataFile.write('correct'+str(i)+'\t')
dataFile.write('responsePosRelative'+str(i)+'\t')
print('condition\t',end='',file=dataFile)
print('timingBlips',file=dataFile)
#end of header
def oneFrameOfStim( n,cue,seq1,seq2,cueDurFrames,letterDurFrames,ISIframes,thisTrial,textStimuliStream1,textStimuliStream2,
noise,proportnNoise,allFieldCoords,numNoiseDots ):
#defining a function to draw each frame of stim.
#seq1 is an array of indices corresponding to the appropriate pre-drawn stimulus, contained in textStimuli
SOAframes = letterDurFrames+ISIframes
cueFrames = thisTrial['cuePos']*SOAframes #cuesPos is global variable
stimN = int( np.floor(n/SOAframes))
frameOfThisLetter = n % SOAframes #every SOAframes, new letter
showLetter = frameOfThisLetter < letterDurFrames #if true, it's not time for the blank ISI. it's still time to draw the letter
#print 'n=',n,' SOAframes=',SOAframes, ' letterDurFrames=', letterDurFrames, ' (n % SOAframes) =', (n % SOAframes) #DEBUGOFF
thisStimIdx = seq1[stimN] #which letter, from A to Z (1 to 26), should be shown?
if seq2 is not None:
thisStim2Idx = seq2[stimN]
#so that any timing problems occur just as often for every frame, always draw the letter and the cue, but simply draw it in the bgColor when it's not meant to be on
cue.setLineColor( bgColor )
if type(cueFrames) not in [tuple,list,np.ndarray]: #scalar. But need collection to do loop based on it
cueFrames = list([cueFrames])
for cueFrame in cueFrames: #cheTck whether it's time for any cue
if n>=cueFrame and n<cueFrame+cueDurFrames:
cue.setLineColor( cueColor )
if showLetter:
textStimuliStream1[thisStimIdx].setColor( letterColor )
textStimuliStream2[thisStim2Idx].setColor( letterColor )
else:
textStimuliStream1[thisStimIdx].setColor( bgColor )
textStimuliStream2[thisStim2Idx].setColor( bgColor )
textStimuliStream1[thisStimIdx].flipHoriz = thisTrial['leftStreamFlip']
textStimuliStream2[thisStim2Idx].flipHoriz = thisTrial['rightStreamFlip']
textStimuliStream1[thisStimIdx].draw()
textStimuliStream2[thisStim2Idx].draw()
cue.draw()
refreshNoise = False #Not recommended because takes longer than a frame, even to shuffle apparently. Or may be setXYs step
if proportnNoise>0 and refreshNoise:
if frameOfThisLetter ==0:
np.random.shuffle(allFieldCoords)
dotCoords = allFieldCoords[0:numNoiseDots]
noise.setXYs(dotCoords)
if proportnNoise>0:
noise.draw()
return True
# #######End of function definition that displays the stimuli!!!! #####################################
#############################################################################################################################
cue = visual.Circle(myWin,
radius=cueRadius,#Martini used circles with diameter of 12 deg
lineColorSpace = 'rgb',
lineColor=bgColor,
lineWidth=4.0, #in pixels. Was thinner (2 pixels) in letter AB experiments
units = 'deg',
fillColorSpace = 'rgb',
fillColor=None, #beware, with convex shapes fill colors don't work
pos= [0,0], #the anchor (rotation and vertices are position with respect to this)
interpolate=True,
autoLog=False)#this stim changes too much for autologging to be useful
ltrHeight = 2.5 #Martini letters were 2.5deg high
#All noise dot coordinates ultimately in pixels, so can specify each dot is one pixel
noiseFieldWidthDeg=ltrHeight *1.0
noiseFieldWidthPix = int( round( noiseFieldWidthDeg*pixelperdegree ) )
def timingCheckAndLog(ts,trialN):
#check for timing problems and log them
#ts is a list of the times of the clock after each frame
interframeIntervs = np.diff(ts)*1000
#print ' interframe intervs were ',around(interframeIntervs,1) #DEBUGOFF
frameTimeTolerance=.3 #proportion longer than refreshRate that will not count as a miss
longFrameLimit = np.round(1000/refreshRate*(1.0+frameTimeTolerance),2)
idxsInterframeLong = np.where( interframeIntervs > longFrameLimit ) [0] #frames that exceeded 150% of expected duration
numCasesInterframeLong = len( idxsInterframeLong )
if numCasesInterframeLong >0 and (not demo):
longFramesStr = 'ERROR,'+str(numCasesInterframeLong)+' frames were longer than '+str(longFrameLimit)+' ms'
if demo:
longFramesStr += 'not printing them all because in demo mode'
else:
longFramesStr += ' apparently screen refreshes skipped, interframe durs were:'+\
str( np.around( interframeIntervs[idxsInterframeLong] ,1 ) )+ ' and was these frames: '+ str(idxsInterframeLong)
if longFramesStr != None:
logging.error( 'trialnum='+str(trialN)+' '+longFramesStr )
if not demo:
flankingAlso=list()
for idx in idxsInterframeLong: #also print timing of one before and one after long frame
if idx-1>=0:
flankingAlso.append(idx-1)
else: flankingAlso.append(np.NaN)
flankingAlso.append(idx)
if idx+1<len(interframeIntervs): flankingAlso.append(idx+1)
else: flankingAlso.append(np.NaN)
flankingAlso = np.array(flankingAlso)
flankingAlso = flankingAlso[np.negative(np.isnan(flankingAlso))] #remove nan values
flankingAlso = flankingAlso.astype(np.integer) #cast as integers, so can use as subscripts
logging.info( 'flankers also='+str( np.around( interframeIntervs[flankingAlso], 1) ) ) #because this is not an essential error message, as previous one already indicates error
#As INFO, at least it won't fill up the console when console set to WARNING or higher
return numCasesInterframeLong
#end timing check
trialClock = core.Clock()
numTrialsCorrect = 0;
numTrialsApproxCorrect = 0;
numTrialsEachCorrect= np.zeros( numRespsWanted )
numTrialsEachApproxCorrect= np.zeros( numRespsWanted )
def do_RSVP_stim(thisTrial, seq1, seq2, proportnNoise,trialN):
#relies on global variables:
# textStimuli, logging, bgColor
# thisTrial should have 'cuePos'
global framesSaved #because change this variable. Can only change a global variable if you declare it
cuesPos = [] #will contain the positions in the stream of all the cues (targets)
cuesPos.append(thisTrial['cuePos'])
cuesPos = np.array(cuesPos)
noise = None; allFieldCoords=None; numNoiseDots=0
if proportnNoise > 0: #gtenerating noise is time-consuming, so only do it once per trial. Then shuffle noise coordinates for each letter
(noise,allFieldCoords,numNoiseDots) = createNoise(proportnNoise,myWin,noiseFieldWidthPix, bgColor)
preDrawStimToGreasePipeline = list() #I don't know why this works, but without drawing it I have consistent timing blip first time that draw ringInnerR for phantom contours
cue.setLineColor(bgColor)
preDrawStimToGreasePipeline.extend([cue])
for stim in preDrawStimToGreasePipeline:
stim.draw()
myWin.flip(); myWin.flip()
#end preparation of stimuli
core.wait(.1);
trialClock.reset()
fixatnPeriodMin = 0.3
fixatnPeriodFrames = int( (np.random.rand(1)/2.+fixatnPeriodMin) *refreshRate) #random interval between 800ms and 1.3s
ts = list(); #to store time of each drawing, to check whether skipped frames
for i in range(fixatnPeriodFrames+20): #prestim fixation interval
#if i%4>=2 or demo or exportImages: #flicker fixation on and off at framerate to see when skip frame
# fixation.draw()
#else: fixationBlank.draw()
fixationPoint.draw()
myWin.flip() #end fixation interval
#myWin.setRecordFrameIntervals(True); #can't get it to stop detecting superlong frames
t0 = trialClock.getTime()
for n in range(trialDurFrames): #this is the loop for this trial's stimulus!
worked = oneFrameOfStim( n,cue,seq1,seq2,cueDurFrames,letterDurFrames,ISIframes,thisTrial,textStimuliStream1,textStimuliStream2,
noise,proportnNoise,allFieldCoords,numNoiseDots ) #draw letter and possibly cue and noise on top
fixationPoint.draw()
if exportImages:
myWin.getMovieFrame(buffer='back') #for later saving
framesSaved +=1
myWin.flip()
t=trialClock.getTime()-t0; ts.append(t);
#end of big stimulus loop
myWin.setRecordFrameIntervals(False);
if task=='T1':
respPromptStim.setText('What was circled?',log=False)
else: respPromptStim.setText('Error: unexpected task',log=False)
postCueNumBlobsAway=-999 #doesn't apply to non-tracking and click tracking task
correctAnswerIdxsStream1 = np.array( seq1[cuesPos] )
correctAnswerIdxsStream2 = np.array( seq2[cuesPos] )
#print('correctAnswerIdxsStream1=',correctAnswerIdxsStream1, 'wordList[correctAnswerIdxsStream1[0]]=',wordList[correctAnswerIdxsStream1[0]])
return cuesPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2,ts
def handleAndScoreResponse(condition, passThisTrial,response,responseAutopilot,task,stimSequence,cuePos,correctAnswerIdx, rsp_n):
#Handle response, calculate whether correct, ########################################
#responses are actual characters
#correctAnswer is index into stimSequence
#autopilot is global variable
if autopilot or passThisTrial:
response = responseAutopilot
#print('handleAndScoreResponse correctAnswerIdxs=',correctAnswerIdxs,'\nstimSequence=',stimSequence, '\nwords=',wordList)
correct = 0
approxCorrect = 0
posOfResponse = -999
responsePosRelative = -999
idx = correctAnswerIdx
if condition == 'HLLR':
if rsp_n == 0:
correctAnswer = wordListHigh[idx]
if rsp_n ==1:
correctAnswer = wordListLow[idx]
elif condition == 'LLHR':
if rsp_n == 0:
correctAnswer = wordListLow[idx]
if rsp_n ==1:
correctAnswer = wordListHigh[idx]
elif condition == 'LLLR':
if rsp_n == 0:
correctAnswer = wordListLow[idx]
if rsp_n ==1:
correctAnswer = wordListLow[idx]
elif condition == 'HLHR':
if rsp_n == 0:
correctAnswer = wordListHigh[idx]
if rsp_n ==1:
correctAnswer = wordListHigh[idx]
responseString= ''.join(['%s' % char for char in response])
responseString= responseString.upper()
#print('correctAnswer=',correctAnswer ,' responseString=',responseString)
if correctAnswer == responseString:
correct = 1
#print('correct=',correct)
if condition == 'HLLR':
if rsp_n == 0:
responseWordIdx = wordToIdx(responseString,wordListHigh)
if rsp_n ==1:
responseWordIdx = wordToIdx(responseString,wordListLow)
elif condition == 'LLHR':
if rsp_n == 0:
responseWordIdx = wordToIdx(responseString,wordListLow)
if rsp_n ==1:
responseWordIdx = wordToIdx(responseString,wordListHigh)
elif condition == 'LLLR':
if rsp_n == 0:
responseWordIdx = wordToIdx(responseString,wordListLow)
if rsp_n ==1:
responseWordIdx = wordToIdx(responseString,wordListLow)
elif condition == 'HLHR':
if rsp_n == 0:
responseWordIdx = wordToIdx(responseString,wordListHigh)
if rsp_n ==1:
responseWordIdx = wordToIdx(responseString,wordListHigh)
if responseWordIdx is None: #response is not in the wordList
posOfResponse = -999
logging.warn('Response was not present in the stimulus stream')
else:
posOfResponse= np.where( responseWordIdx==stimSequence )
posOfResponse= posOfResponse[0] #list with two entries, want first which will be array of places where the response was found in the sequence
if len(posOfResponse) > 1:
logging.error('Expected response to have occurred in only one position in stream')
posOfResponse = posOfResponse[0] #first element of list (should be only one element long
responsePosRelative = posOfResponse - cuePos
approxCorrect = abs(responsePosRelative)<= 3 #Vul efficacy measure of getting it right to within plus/minus
#print('wordToIdx(',responseString,',',wordList,')=',responseWordIdx,' stimSequence=',stimSequence,'\nposOfResponse = ',posOfResponse) #debugON
#print response stuff to dataFile
#header was answerPos0, answer0, response0, correct0, responsePosRelative
print(cuePos,'\t', end='', file=dataFile)
print(correctAnswer, '\t', end='', file=dataFile) #answer0
print(responseString, '\t', end='', file=dataFile) #response0
print(correct, '\t', end='',file=dataFile) #correct0
print(responsePosRelative, '\t', end='',file=dataFile) #responsePosRelative0
return correct,approxCorrect,responsePosRelative
#end handleAndScoreResponses
def play_high_tone_correct_low_incorrect(correct, passThisTrial=False):
highA = sound.Sound('G',octave=5, sampleRate=6000, secs=.3, bits=8)
low = sound.Sound('F',octave=3, sampleRate=6000, secs=.3, bits=8)
highA.setVolume(0.9)
low.setVolume(1.0)
if correct:
highA.play()
elif passThisTrial:
high= sound.Sound('G',octave=4, sampleRate=2000, secs=.08, bits=8)
for i in range(2):
high.play(); low.play();
else: #incorrect
low.play()
expStop=False
nDoneMain = -1 #change to zero once start main part of experiment
if doStaircase:
#create the staircase handler
useQuest = True
if useQuest:
staircase = data.QuestHandler(startVal = 95,
startValSd = 80,
stopInterval= 1, #sd of posterior has to be this small or smaller for staircase to stop, unless nTrials reached
nTrials = staircaseTrials,
#extraInfo = thisInfo,
pThreshold = threshCriterion, #0.25,
gamma = 1./26,
delta=0.02, #lapse rate, I suppose for Weibull function fit
method = 'quantile', #uses the median of the posterior as the final answer
stepType = 'log', #will home in on the 80% threshold. But stepType = 'log' doesn't usually work
minVal=1, maxVal = 100
)
print('created QUEST staircase')
else:
stepSizesLinear = [.2,.2,.1,.1,.05,.05]
stepSizesLog = [log(1.4,10),log(1.4,10),log(1.3,10),log(1.3,10),log(1.2,10)]
staircase = data.StairHandler(startVal = 0.1,
stepType = 'log', #if log, what do I want to multiply it by
stepSizes = stepSizesLog, #step size to use after each reversal
minVal=0, maxVal=1,
nUp=1, nDown=3, #will home in on the 80% threshold
nReversals = 2, #The staircase terminates when nTrials have been exceeded, or when both nReversals and nTrials have been exceeded
nTrials=1)
print('created conventional staircase')
if prefaceStaircaseTrialsN > len(prefaceStaircaseNoise): #repeat array to accommodate desired number of easyStarterTrials
prefaceStaircaseNoise = np.tile( prefaceStaircaseNoise, ceil( prefaceStaircaseTrialsN/len(prefaceStaircaseNoise) ) )
prefaceStaircaseNoise = prefaceStaircaseNoise[0:prefaceStaircaseTrialsN]
phasesMsg = ('Doing '+str(prefaceStaircaseTrialsN)+'trials with noisePercent= '+str(prefaceStaircaseNoise)+' then doing a max '+str(staircaseTrials)+'-trial staircase')
print(phasesMsg); logging.info(phasesMsg)
#staircaseStarterNoise PHASE OF EXPERIMENT
corrEachTrial = list() #only needed for easyStaircaseStarterNoise
staircaseTrialN = -1; mainStaircaseGoing = False
while (not staircase.finished) and expStop==False: #staircase.thisTrialN < staircase.nTrials
if staircaseTrialN+1 < len(prefaceStaircaseNoise): #still doing easyStaircaseStarterNoise
staircaseTrialN += 1
noisePercent = prefaceStaircaseNoise[staircaseTrialN]
else:
if staircaseTrialN+1 == len(prefaceStaircaseNoise): #add these non-staircase trials so QUEST knows about them
mainStaircaseGoing = True
print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise)
staircase.importData(100-prefaceStaircaseNoise, np.array(corrEachTrial))
printStaircase(staircase, descendingPsycho, briefTrialUpdate=False, printInternalVal=True, alsoLog=False)
try: #advance the staircase
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)
noisePercent = 100. - staircase.next() #will step through the staircase, based on whether told it (addResponse) got it right or wrong
staircaseTrialN += 1
except StopIteration: #Need this here, even though test for finished above. I can't understand why finished test doesn't accomplish this.
print('stopping because staircase.next() returned a StopIteration, which it does when it is finished')
break #break out of the trials loop
#print('staircaseTrialN=',staircaseTrialN)
if thisTrial['condition']=='HLHR':
sequenceStream1, sequenceStream2 = calcAndPredrawStimuli(wordListHigh, wordListHigh)
elif thisTrial['condition']=='LLHR':
sequenceStream1, sequenceStream2 = calcAndPredrawStimuli(wordListLow, wordListHigh)
elif thisTrial['condition']=='HLLR':
sequenceStream1, sequenceStream2 = calcAndPredrawStimuli(wordListHigh, wordListLow)
elif thisTrial['condition']=='LLHR':
sequenceStream1, sequenceStream2 = calcAndPredrawStimuli(wordListLow, wordListLow)
cuesPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2, ts = \
do_RSVP_stim(cuePos, idxsStream1, idxsStream2, noisePercent/100.,staircaseTrialN)
numCasesInterframeLong = timingCheckAndLog(ts,staircaseTrialN)
expStop,passThisTrial,responses,responsesAutopilot = \
stringResponse.collectStringResponse(numRespsWanted,respPromptStim,respStim,acceptTextStim,myWin,clickSound,badKeySound,
requireAcceptance,autopilot,responseDebug=True)
if not expStop:
if mainStaircaseGoing:
print('staircase\t', end='', file=dataFile)
else:
print('staircase_preface\t', end='', file=dataFile)
#header start 'trialnum\tsubject\ttask\t'
print(staircaseTrialN,'\t', end='', file=dataFile) #first thing printed on each line of dataFile
print(subject,'\t',task,'\t', round(noisePercent,2),'\t', end='', file=dataFile)
correct,approxCorrect,responsePosRelative= handleAndScoreResponse(thisTrial['condition'],
passThisTrial,responses,responseAutopilot,task,sequenceLeft,cuesPos[0],correctAnswerIdx )
print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile
core.wait(.06)
if feedback:
play_high_tone_correct_low_incorrect(correct, passThisTrial=False)
print('staircaseTrialN=', staircaseTrialN,' noisePercent=',round(noisePercent,3),' T1approxCorrect=',T1approxCorrect) #debugON
corrEachTrial.append(T1approxCorrect)
if mainStaircaseGoing:
staircase.addResponse(T1approxCorrect, intensity = 100-noisePercent) #Add a 1 or 0 to signify a correct/detected or incorrect/missed trial
#print('Have added an intensity of','{:.3f}'.format(100-noisePercent), 'T1approxCorrect =', T1approxCorrect, ' to staircase') #debugON
#ENDING STAIRCASE PHASE
if staircaseTrialN+1 < len(prefaceStaircaseNoise) and (staircaseTrialN>=0): #exp stopped before got through staircase preface trials, so haven't imported yet
print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise[0:staircaseTrialN+1])
staircase.importData(100-prefaceStaircaseNoise[0:staircaseTrialN], np.array(corrEachTrial))
print('framesSaved after staircase=',framesSaved) #debugON
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = ('prefaceStaircase phase' if expStop else '')
msg += ('ABORTED' if expStop else 'Finished') + ' staircase part of experiment at ' + timeAndDateStr
logging.info(msg); print(msg)
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)
#print('staircase.quantile=',round(staircase.quantile(),2),' sd=',round(staircase.sd(),2))
threshNoise = round(staircase.quantile(),3)
if descendingPsycho:
threshNoise = 100- threshNoise
threshNoise = max( 0, threshNoise ) #e.g. ff get all trials wrong, posterior peaks at a very negative number
msg= 'Staircase estimate of threshold = ' + str(threshNoise) + ' with sd=' + str(round(staircase.sd(),2))
logging.info(msg); print(msg)
myWin.close()
#Fit and plot data
fit = None
try:
intensityForCurveFitting = staircase.intensities
if descendingPsycho:
intensityForCurveFitting = 100-staircase.intensities #because fitWeibull assumes curve is ascending
fit = data.FitWeibull(intensityForCurveFitting, staircase.data, expectedMin=1/26., sems = 1.0/len(staircase.intensities))
except:
print("Fit failed.")
plotDataAndPsychometricCurve(staircase,fit,descendingPsycho,threshCriterion)
#save figure to file
pylab.savefig(fileName+'.pdf')
print('The plot has been saved, as '+fileName+'.pdf')
pylab.show() #must call this to actually show plot
else: #not staircase
noisePercent = defaultNoiseLevel
phasesMsg = 'Experiment will have '+str(trials.nTotal)+' trials. Letters will be drawn with superposed noise of ' + "{:.2%}".format(defaultNoiseLevel)
print(phasesMsg); logging.info(phasesMsg)
nDoneMain =0
while nDoneMain < trials.nTotal and expStop==False: #MAIN EXPERIMENT LOOP
if nDoneMain==0:
msg='Starting main (non-staircase) part of experiment'
logging.info(msg); print(msg)
thisTrial = trials.next() #get a proper (non-staircase) trial
print('thisTrial condition is' + thisTrial['condition'])
if thisTrial['condition']=='HLHR':
sequenceStream1, sequenceStream2 = calcAndPredrawStimuli(wordListHigh, wordListHigh)
elif thisTrial['condition']=='LLHR':
sequenceStream1, sequenceStream2 = calcAndPredrawStimuli(wordListLow, wordListHigh)
elif thisTrial['condition']=='HLLR':
sequenceStream1, sequenceStream2 = calcAndPredrawStimuli(wordListHigh, wordListLow)
elif thisTrial['condition']=='LLLR':
sequenceStream1, sequenceStream2 = calcAndPredrawStimuli(wordListLow, wordListLow)
cuesPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2, ts = \
do_RSVP_stim(thisTrial, sequenceStream1, sequenceStream2, noisePercent/100.,nDoneMain)
numCasesInterframeLong = timingCheckAndLog(ts,nDoneMain)
#call for each response
expStop = list(); passThisTrial = list(); responses=list(); responsesAutopilot=list()
numCharsInResponse = 4
dL = [None]*numRespsWanted #dummy list for null values
expStop = copy.deepcopy(dL); responses = copy.deepcopy(dL); responsesAutopilot = copy.deepcopy(dL); passThisTrial=copy.deepcopy(dL)
responseOrder = range(numRespsWanted)
if thisTrial['rightResponseFirst']: #change order of indices depending on rightResponseFirst. response0, answer0 etc refer to which one had to be reported first
responseOrder.reverse()
for i in responseOrder:
x = 3* wordEccentricity*(i*2-1) #put it 3 times farther out than stimulus, so participant is sure which is left and which right
expStop[i],passThisTrial[i],responses[i],responsesAutopilot[i] = stringResponse.collectStringResponse(
numCharsInResponse,x,respPromptStim,respStim,acceptTextStim,fixationPoint,myWin,clickSound,badKeySound,
requireAcceptance,autopilot,responseDebug=True)
expStop = np.array(expStop).any(); passThisTrial = np.array(passThisTrial).any()
if not expStop:
print('main\t', end='', file=dataFile) #first thing printed on each line of dataFile to indicate main part of experiment, not staircase
print(nDoneMain,'\t', end='', file=dataFile)
print(subject,'\t',task,'\t', round(noisePercent,3),'\t', end='', file=dataFile)
print(thisTrial['leftStreamFlip'],'\t', end='', file=dataFile)
print(thisTrial['rightStreamFlip'],'\t', end='', file=dataFile)
print(thisTrial['rightResponseFirst'],'\t', end='', file=dataFile)
i = 0
eachCorrect = np.ones(numRespsWanted)*-999; eachApproxCorrect = np.ones(numRespsWanted)*-999
for i in range(numRespsWanted): #scored and printed to dataFile in left first, right second order even if collected in different order
if i==0:
sequenceStream = sequenceStream1; correctAnswerIdxs = correctAnswerIdxsStream1;
else: sequenceStream = sequenceStream2; correctAnswerIdxs = correctAnswerIdxsStream2;
correct,approxCorrect,responsePosRelative = (
handleAndScoreResponse(thisTrial['condition'], passThisTrial,responses[i],responsesAutopilot[i],task,sequenceStream,thisTrial['cuePos'],correctAnswerIdxs, i) )
eachCorrect[i] = correct
eachApproxCorrect[i] = approxCorrect
print(thisTrial['condition'], '\t', end='', file=dataFile)
print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile
print('correct=',correct,' approxCorrect=',approxCorrect,' eachCorrect=',eachCorrect, ' responsePosRelative=', responsePosRelative)
numTrialsCorrect += eachCorrect.all() #so count -1 as 0
numTrialsApproxCorrect += eachApproxCorrect.all()
numTrialsEachCorrect += eachCorrect #list numRespsWanted long
numTrialsEachApproxCorrect += eachApproxCorrect #list numRespsWanted long
if exportImages: #catches one frame of response
myWin.getMovieFrame() #I cant explain why another getMovieFrame, and core.wait is needed
framesSaved +=1; core.wait(.1)
myWin.saveMovieFrames('images_sounds_movies/frames.png') #mov not currently supported
expStop=True
core.wait(.1)
if feedback: play_high_tone_correct_low_incorrect(correct, passThisTrial=False)
nDoneMain+=1
dataFile.flush(); logging.flush()
print('nDoneMain=', nDoneMain,' trials.nTotal=',trials.nTotal) #' trials.thisN=',trials.thisN
if (trials.nTotal > 6 and nDoneMain > 2 and nDoneMain %
( trials.nTotal*pctCompletedBreak/100. ) ==1): #dont modulus 0 because then will do it for last trial
nextText.setText('Press "SPACE" to continue!')
nextText.draw()
progressMsg = 'Completed ' + str(nDoneMain) + ' of ' + str(trials.nTotal) + ' trials'
NextRemindCountText.setText(progressMsg)
NextRemindCountText.draw()
myWin.flip() # myWin.flip(clearBuffer=True)
waiting=True
while waiting:
if autopilot: break
elif expStop == True:break
for key in event.getKeys(): #check if pressed abort-type key
if key in ['space','ESCAPE']:
waiting=False
if key in ['ESCAPE']:
expStop = True
myWin.clearBuffer()
core.wait(.2); time.sleep(.2)
#end main trials loop
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = 'Finishing at '+timeAndDateStr
print(msg); logging.info(msg)
if expStop:
msg = 'user aborted experiment on keypress with trials done=' + str(nDoneMain) + ' of ' + str(trials.nTotal+1)
print(msg); logging.error(msg)
if not doStaircase and (nDoneMain >0):
msg = 'Of ' + str(nDoneMain)+' trials, on '+str(numTrialsCorrect*1.0/nDoneMain*100.)+'% of all trials all targets reported exactly correct'
print(msg); logging.info(msg)
msg= 'All targets approximately correct in '+ str( round(numTrialsApproxCorrect*1.0/nDoneMain*100,1)) + '% of trials'
print(msg); logging.info(msg)
for i in range(numRespsWanted):
msg = 'stream'+str(i)+': '+str( round(numTrialsEachCorrect[i]*1.0/nDoneMain*100.,2) ) + '% correct'
print(msg); logging.info(msg)
msg = 'stream' + str(i) + ': '+ str( round(numTrialsEachApproxCorrect[i]*1.0/nDoneMain*100,2) ) +'% approximately correct'
print(msg); logging.info(msg)
logging.flush(); dataFile.close()
myWin.close() #have to close window if want to show a plot
| {
"repo_name": "alexholcombe/twoWords",
"path": "Charlie/twoWords.py",
"copies": "3",
"size": "52843",
"license": "mit",
"hash": -8010308503823285000,
"line_mean": 55.9418859649,
"line_max": 209,
"alpha_frac": 0.6681490453,
"autogenerated": false,
"ratio": 3.5582115682445625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02795297312900653,
"num_lines": 912
} |
#Alex Holcombe alex.holcombe@sydney.edu.au
#See the github repository for more information: https://github.com/alexholcombe/twoWords
from __future__ import print_function #use python3 style print
from psychopy import monitors, visual, event, data, logging, core, sound, gui
import psychopy.info
import numpy as np
from math import atan, log, ceil
from copy import deepcopy
import copy
import time, sys, os, pylab, random, string
try:
from noiseStaircaseHelpers import printStaircase, toStaircase, outOfStaircase, createNoise, plotDataAndPsychometricCurve
except ImportError:
print('Could not import from noiseStaircaseHelpers.py (you need that file to be in the same directory)')
try:
import stringResponse
except ImportError:
print('Could not import stringResponse.py (you need that file to be in the same directory)')
try:
import letterLineupResponse
except ImportError:
print('Could not import letterLineupResponse.py (you need that file to be in the same directory)')
tasks=['T1']; task = tasks[0]
#THINGS THAT COULD PREVENT SUCCESS ON A NEW MACHINE
#same screen or external screen? Set scrn=0 if one screen. scrn=1 means display stimulus on second screen.
#widthPix, heightPix
quitFinder = False #if checkRefreshEtc, quitFinder becomes True.
autopilot=False
demo=False #False
exportImages= False #quits after one trial
subject='Hubert' #user is prompted to enter true subject name
if autopilot: subject='auto'
if os.path.isdir('.'+os.sep+'data'):
dataDir='data'
else:
print('"data" directory does not exist, so saving data in present working directory')
dataDir='.'
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
showRefreshMisses=True #flicker fixation at refresh rate, to visualize if frames missed
feedback=True
autoLogging=False
refreshRate = 60.; #100
if demo:
refreshRate = 60.; #100
staircaseTrials = 25
prefaceStaircaseTrialsN = 20 #22
prefaceStaircaseNoise = np.array([5,20,20,20, 50,50,50,5,80,80,80,5,95,95,95]) #will be recycled / not all used, as needed
descendingPsycho = True #psychometric function- more noise means worse performance
threshCriterion = 0.58
numWordsInStream = 26 #Experiment will only work if all 26 letters are presented, otherwise error when you pick a letter that was not presented
wordsUnparsed="a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z"
wordList = wordsUnparsed.split(",") #split into list
for i in range(len(wordList)):
wordList[i] = wordList[i].replace(" ", "") #delete spaces
if len(wordList) > numWordsInStream:
print("WARNING: you have asked for streams that have more stimuli than are in the wordList, so some will be duplicated")
#Later on, a list of indices into this list will be randomly permuted for each trial
print(wordList)
print(len(wordList))
bgColor = [-.7,-.7,-.7] # [-1,-1,-1]
cueColor = [1.,1.,1.]
letterColor = [1.,1.,1.]
cueRadius = 3 #6 deg in Goodbourn & Holcombe
widthPix= 1920 #1280 #monitor width in pixels of Agosta
heightPix= 1080 #800 #monitor height in pixels
monitorwidth = 52.2 #38.7 #monitor width in cm
scrn=1 #0 to use main screen, 1 to use external screen connected to computer
fullscr=True #True to use fullscreen, False to not. Timing probably won't be quite right if fullscreen = False
allowGUI = False
if demo: monitorwidth = 23#18.0
if exportImages:
widthPix = 600; heightPix = 600
monitorwidth = 13.0
fullscr=False; scrn=0
framesSaved=0
if demo:
scrn=0; fullscr=False
widthPix = 800; heightPix = 600
monitorname='testMonitor'
allowGUI = True
viewdist = 57. #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)
print('pixelperdegree=',pixelperdegree)
try:
click=sound.Sound('406__tictacshutup__click-1-d.wav')
except: #in case file missing, create inferiro click manually
logging.warn('Could not load the desired click sound file, instead using manually created inferior click')
click=sound.Sound('D',octave=4, sampleRate=22050, secs=0.015, bits=8)
clickSound, badKeySound = stringResponse.setupSoundsForResponse()
# create a dialog from dictionary
infoFirst = { 'Do staircase (only)': False, 'Check refresh etc':True, 'Fullscreen (timing errors if not)': False, 'Screen refresh rate':refreshRate }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='Dual-RSVP experiment OR staircase to find thresh noise level for performance criterion',
order=['Do staircase (only)', 'Check refresh etc', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating'},
#fixed=['Check refresh etc'])#this attribute can't be changed by the user
)
if not OK.OK:
print('User cancelled from dialog box'); core.quit()
doStaircase = infoFirst['Do staircase (only)']
checkRefreshEtc = infoFirst['Check refresh etc']
fullscr = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
if checkRefreshEtc:
quitFinder = True
if quitFinder:
import os
applescript="\'tell application \"Finder\" to quit\'"
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
#letter size 2.5 deg
SOAms = 100 # 133 #Battelli, Agosta, Goodbourn, Holcombe mostly using 133
#Minimum SOAms should be 84 because any shorter, I can't always notice the second ring when lag1. 71 in Martini E2 and E1b (actually he used 66.6 but that's because he had a crazy refresh rate of 90 Hz)
letterDurMs = 80 #23.6 in Martini E2 and E1b (actually he used 22.2 but that's because he had a crazy refresh rate of 90 Hz)
ISIms = SOAms - letterDurMs
letterDurFrames = int( np.floor(letterDurMs / (1000./refreshRate)) )
cueDurFrames = letterDurFrames
ISIframes = int( np.floor(ISIms / (1000./refreshRate)) )
#have set ISIframes and letterDurFrames to integer that corresponds as close as possible to originally intended ms
rateInfo = 'total SOA=' + str(round( (ISIframes + letterDurFrames)*1000./refreshRate, 2)) + ' or ' + str(ISIframes + letterDurFrames) + ' frames, comprising\n'
rateInfo+= 'ISIframes ='+str(ISIframes)+' or '+str(ISIframes*(1000./refreshRate))+' ms and letterDurFrames ='+str(letterDurFrames)+' or '+str(round( letterDurFrames*(1000./refreshRate), 2))+'ms'
logging.info(rateInfo); print(rateInfo)
trialDurFrames = int( numWordsInStream*(ISIframes+letterDurFrames) ) #trial duration in frames
monitorname = 'testmonitor'
waitBlank = False
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#relying on monitorwidth cm (39 for Mitsubishi to do deg calculations) and gamma info in calibratn
mon.setSizePix( (widthPix,heightPix) )
units='deg' #'cm'
def openMyStimWindow(): #make it a function because have to do it several times, want to be sure is identical each time
myWin = visual.Window(monitor=mon,size=(widthPix,heightPix),allowGUI=allowGUI,units=units,color=bgColor,colorSpace='rgb',fullscr=fullscr,screen=scrn,waitBlanking=waitBlank) #Holcombe lab monitor
return myWin
myWin = openMyStimWindow()
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: #checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
#print(runInfo)
logging.info(runInfo)
print('Finished runInfo- which assesses the refresh and processes of this computer')
#check screen refresh is what assuming it is ##############################################
Hzs=list()
myWin.flip(); myWin.flip();myWin.flip();myWin.flip();
myWin.setRecordFrameIntervals(True) #otherwise myWin.fps won't work
print('About to measure frame flips')
for i in range(50):
myWin.flip()
Hzs.append( myWin.fps() ) #varies wildly on successive runs!
myWin.setRecordFrameIntervals(False)
# end testing of screen refresh########################################################
Hzs = np.array( Hzs ); Hz= np.median(Hzs)
msPerFrame= 1000./Hz
refreshMsg1= 'Frames per second ~='+ str( np.round(Hz,1) )
refreshRateTolerancePct = 3
pctOff = abs( (np.median(Hzs)-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
defaultNoiseLevel = 0.0 #to use if no staircase, can be set by user
trialsPerCondition = 10 #default value
dlgLabelsOrdered = list()
if doStaircase:
myDlg = gui.Dlg(title="Staircase to find appropriate noisePercent", pos=(200,400))
else:
myDlg = gui.Dlg(title="RSVP experiment", pos=(200,400))
if not autopilot:
myDlg.addField('Subject name (default="Hubert"):', 'Hubert', tip='or subject code')
dlgLabelsOrdered.append('subject')
if doStaircase:
easyTrialsCondText = 'Num preassigned noise trials to preface staircase with (default=' + str(prefaceStaircaseTrialsN) + '):'
myDlg.addField(easyTrialsCondText, tip=str(prefaceStaircaseTrialsN))
dlgLabelsOrdered.append('easyTrials')
myDlg.addField('Staircase trials (default=' + str(staircaseTrials) + '):', tip="Staircase will run until this number is reached or it thinks it has precise estimate of threshold")
dlgLabelsOrdered.append('staircaseTrials')
pctCompletedBreak = 101
else:
myDlg.addField('\tPercent noise dots=', defaultNoiseLevel, tip=str(defaultNoiseLevel))
dlgLabelsOrdered.append('defaultNoiseLevel')
myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))
dlgLabelsOrdered.append('trialsPerCondition')
pctCompletedBreak = 20
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Screen apparently NOT the desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels!!'
myDlg.addText(msgWrongResolution, color='Red')
logging.error(msgWrongResolution)
print(msgWrongResolution)
dimGreyForDlgBox = 'DimGrey'
from distutils.version import LooseVersion
if LooseVersion(psychopy.__version__) < LooseVersion("1.84.2"):
dimGreyForDlgBox = [-1.,1.,-1.] #color names stopped working along the way, for unknown reason
myDlg.addText('Note: to abort press ESC at a trials response screen', color=dimGreyForDlgBox) # color='DimGrey') color names stopped working along the way, for unknown reason
myDlg.show()
if myDlg.OK: #unpack information entered in dialogue box
thisInfo = myDlg.data #this will be a list of data returned from each field added in order
if not autopilot:
name=thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: #if entered something
subject = name #change subject default name to what user entered
if doStaircase:
if len(thisInfo[dlgLabelsOrdered.index('staircaseTrials')]) >0:
staircaseTrials = int( thisInfo[ dlgLabelsOrdered.index('staircaseTrials') ] ) #convert string to integer
print('staircaseTrials entered by user=',staircaseTrials)
logging.info('staircaseTrials entered by user=',staircaseTrials)
if len(thisInfo[dlgLabelsOrdered.index('easyTrials')]) >0:
prefaceStaircaseTrialsN = int( thisInfo[ dlgLabelsOrdered.index('easyTrials') ] ) #convert string to integer
print('prefaceStaircaseTrialsN entered by user=',thisInfo[dlgLabelsOrdered.index('easyTrials')])
logging.info('prefaceStaircaseTrialsN entered by user=',prefaceStaircaseTrialsN)
else: #not doing staircase
trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer
print('trialsPerCondition=',trialsPerCondition)
logging.info('trialsPerCondition =',trialsPerCondition)
defaultNoiseLevel = int (thisInfo[ dlgLabelsOrdered.index('defaultNoiseLevel') ])
else:
print('User cancelled from dialog box.')
logging.flush()
core.quit()
if not demo:
allowGUI = False
myWin = openMyStimWindow() #reopen stim window. Had to close test window to allow for dialogue boxes
#set up output data file, log file, copy of program code, and logging
infix = '' #part of the filenames
if doStaircase:
infix = 'staircase_'
fileName = os.path.join(dataDir, subject + '_' + infix+ timeAndDateStr)
if not demo and not exportImages:
dataFile = open(fileName+'.txt', 'w')
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileName + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logFname = fileName+'.log'
ppLogF = logging.LogFile(logFname,
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#errors, data and warnings will be sent to this logfile
if demo or exportImages:
dataFile = sys.stdout; logF = sys.stdout
logging.console.setLevel(logging.ERROR) #only show this level messages and higher
logging.console.setLevel(logging.ERROR) #DEBUG means set console to receive nearly all messges, INFO next level, EXP, DATA, WARNING and ERROR
if fullscr and not demo and not exportImages:
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=False, ## True means report on everything
userProcsDetailed=True, ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
#randomSeed='set:42', ## a way to record, and optionally set, a random seed of type str for making reproducible random sequences
## None -> default
## 'time' will use experimentRuntime.epoch as the value for the seed, different value each time the script is run
##'set:time' --> seed value is set to experimentRuntime.epoch, and initialized: random.seed(info['randomSeed'])
##'set:42' --> set & initialize to str('42'), and will give the same sequence of random.random() for all runs of the script
)
logging.info(runInfo)
logging.flush()
def detectDuplicates(myList):
uniqueVals = set(myList)
if len( list(uniqueVals) ) < len(myList):
return True
else: return False
def readFileAndScramble(numWordsInStream):
#Abandoning use of this for Cheryl's experiment because too hard to find enough non-word bigrams for which letters not repeated in either stream
stimFile = 'wordStimuliGeneration/twoLetters-Cheryl.txt'
stimListFile= open(stimFile)
bigramList = [x.rstrip() for x in stimListFile.readlines()]
print('Read in', len(bigramList), 'strings')
#print('bigramList = ',bigramList)
stimListFile.close()
#Scramble
shuffled = deepcopy(bigramList)
shuffleUntilNoDuplicatesOfFirstOrSecondLetter = True
duplicates = True #intiialise this as true so the loop will run at least once
while shuffleUntilNoDuplicatesOfFirstOrSecondLetter and duplicates:
random.shuffle(shuffled)
#print('first 10 unshuffled=',bigramList[:10])
#print('first 10 shuffled=',shuffled[:10])
#Break into two
firstLetters = list()
secondLetters = list()
for bigram in shuffled[:numWordsInStream]:
firstLetter = bigram[0]
secondLetter = bigram[1]
firstLetters.append( firstLetter )
secondLetters.append ( secondLetter )
print("shuffled firstLetters=",firstLetters," secondLetters=",secondLetters)
duplicates = detectDuplicates(firstLetters)
if not duplicates:
duplicates = detectDuplicates(secondLetters)
print('first 20 shuffled firstLetters=',firstLetters[:20])
print('first 20 shuffled secondLetters=',secondLetters[:20])
return firstLetters, secondLetters
def findLtrInList(letter,wordList):
try:
idx = wordList.index(letter)
except ValueError:
print("Error! ", letter," not found in wordList")
except Exception as e:
print('Unexpected error',e)
#print("Searched for ",letter," in the wordList and index returned was ",idx)
return idx
def calcSequenceForThisTrial():
print("lenWordlist",len(wordList))
idxsIntoWordList = range(len(wordList)) #create a list of indexes of the entire word list: 0,1,2,3,4,5,...23
print("idxsInto",idxsIntoWordList)
readFromFile = False
if readFromFile:
#read in the file of list of bigrams. Doesn't work because too hard to find enough non-word bigrams for which letters not repeated in either stream
firstLetters, secondLetters = readFileAndScramble(numWordsInStream)
#Now must determine what indexes into the wordList (list of letters pre-drawn) correspond to these
idxsStream1 = list()
print("idxsStream1FirstTime",idxsStream1)
idxsStream2 = list()
print("idxsStream2FirstTime",idxsStream2)
for ltri in range(numWordsInStream): #Find where in the "wordList" each letter is, add it to idxsStream1
letter = firstLetters[ltri]
idx = findLtrInList(letter, wordList)
idxsStream1.append(idx)
print("idxsStream1SecondTime",idxsStream1)
#print("final idxsStream1=",idxsStream1)
for ltri in range(numWordsInStream): #Find where in the "wordList" each letter is, add it to idxsStream1
letter = secondLetters[ltri]
idx = findLtrInList(letter, wordList)
idxsStream2.append(idx)
print("idxsStream2SecondTime",idxsStream2)
else: #if not readFromFile: #just create a shuffled index of all the possibilities
np.random.shuffle(idxsIntoWordList) #0,1,2,3,4,5,... -> randomly permuted 3,2,5,...
print("idxsintoWordList",idxsIntoWordList)
idxsStream1 = copy.deepcopy(idxsIntoWordList) #first RSVP stream
idxsStream1= idxsStream1[:numWordsInStream] #take the first numWordsInStream of the shuffled list
idxsStream2 = copy.deepcopy(idxsIntoWordList) #make a copy for the right stream, and permute them on the next list
np.random.shuffle(idxsStream2)
idxsStream2= idxsStream2[:numWordsInStream] #take the first numWordsInStream of the shuffled list
print("idxsStream1",idxsStream1)
print("idxsStream2",idxsStream2)
return idxsStream1, idxsStream2
textStimuliStream1 = list()
textStimuliStream2 = list() #used for second, simultaneous RSVP stream
def calcAndPredrawStimuli(wordList,cues, preCues,thisTrial): #Called before each trial
#textStimuliStream1 and 2 assumed to be global variables
if len(wordList) < numWordsInStream:
print('Error! Your word list must have at least ',numWordsInStream,'strings')
#print('wordList=',wordList)
textStimuliStream1[:] = [] #Delete all items in the list
textStimuliStream2[:] = [] #Delete all items in the list
for i in xrange( len(cues) ):
eccentricity = thisTrial['wordEccentricity']
if eccentricity < 2: #kludge to deal with very low separation case where want just one cue - draw them both in the same place
eccentricity = 0
if i==0:
cues[i].setPos( [-eccentricity, 0] )
preCues[i].setPos( [-eccentricity, 0] )
else:
cues[i].setPos( [eccentricity, 0] )
preCues[i].setPos( [eccentricity, 0] )
for i in range(0,len(wordList)): #draw all the words. Later, the seq will indicate which one to present on each frame. The seq might be shorter than the wordList
word = wordList[ i ]
#flipHoriz, flipVert textStim http://www.psychopy.org/api/visual/textstim.html
#Create one bucket of words for the left stream
textStimulusStream1 = visual.TextStim(myWin,text=word,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)
#Create a bucket of words for the right stream
textStimulusStream2 = visual.TextStim(myWin,text=word,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)
textStimulusStream1.setPos([-thisTrial['wordEccentricity'],0]) #left
textStimuliStream1.append(textStimulusStream1) #add to list of text stimuli that comprise stream 1
textStimulusStream2.setPos([thisTrial['wordEccentricity'],0]) #right
textStimuliStream2.append(textStimulusStream2) #add to list of text stimuli that comprise stream 2
#Use these buckets by pulling out the drawn words in the order you want them. For now, just create the order you want.
idxsStream1, idxsStream2 = calcSequenceForThisTrial()
return idxsStream1, idxsStream2, cues, preCues
#create click sound for keyboard
try:
click=sound.Sound('406__tictacshutup__click-1-d.wav')
except: #in case file missing, create inferiro click manually
logging.warn('Could not load the desired click sound file, instead using manually created inferior click')
click=sound.Sound('D',octave=4, sampleRate=22050, secs=0.015, bits=8)
if showRefreshMisses:
fixSizePix = 32 #2.6 #make fixation bigger so flicker more conspicuous
else: fixSizePix = 32
fixColor = [1,1,1]
if exportImages: fixColor= [0,0,0]
fixatnNoiseTexture = np.round( np.random.rand(fixSizePix/4,fixSizePix/4) ,0 ) *2.0-1 #Can counterphase flicker noise texture to create salient flicker if you break fixation
#Construct the fixation point.
fixation= visual.PatchStim(myWin, tex=fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False)
fixationBlank= visual.PatchStim(myWin, tex= -1*fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False) #reverse contrast
fixationPoint= visual.PatchStim(myWin,tex='none',colorSpace='rgb',color=(1,-1,-1),size=4,units='pix',autoLog=autoLogging)
#Construct the holders for the experiment text that will appear on screen
respPromptStim = visual.TextStim(myWin,pos=(0, -.9),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim = visual.TextStim(myWin,pos=(0, -.8),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim.setText('Hit ENTER to accept. Backspace to edit')
respStim = visual.TextStim(myWin,pos=(0,0),colorSpace='rgb',color=(1,1,0),alignHoriz='center', alignVert='center',height=3,units='deg',autoLog=autoLogging)
requireAcceptance = True
nextText = visual.TextStim(myWin,pos=(0, .1),colorSpace='rgb',color = (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
NextRemindCountText = visual.TextStim(myWin,pos=(0,.2),colorSpace='rgb',color= (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
#clickSound, badKeySound = stringResponse.setupSoundsForResponse()
screenshot= False; screenshotDone = False
stimList = []
#SETTING THE CONDITIONS, This implements the full factorial design!
cueSerialPositions = np.array([7,9,11,13,15])
for cueSerialPos in cueSerialPositions:
for rightResponseFirst in [False,True]:
for wordEcc in [0.8,6]:
stimList.append( {'cueSerialPos':cueSerialPos, 'rightResponseFirst':rightResponseFirst,
'leftStreamFlip':False, 'rightStreamFlip':False,
'wordEccentricity':wordEcc } )
trials = data.TrialHandler(stimList,trialsPerCondition) #constant stimuli method. Duplicate the list of conditions trialsPerCondition times to create the full experiment
trialsForPossibleStaircase = data.TrialHandler(stimList,trialsPerCondition) #independent randomization, just to create random trials for staircase phase
numRightWrongEachCuepos = np.zeros([ len(cueSerialPositions), 1 ]); #summary results to print out at end
logging.info( 'numtrials=' + str(trials.nTotal) + ' and each trialDurFrames='+str(trialDurFrames)+' or '+str(trialDurFrames*(1000./refreshRate))+ \
' ms' + ' task=' + task)
def numberToLetter(number): #0 = A, 25 = Z
#if it's not really a letter, return @
if number < 0 or number > 25:
return ('@')
else: #it's probably a letter
try:
return chr( ord('A')+number )
except:
return('@')
def letterToNumber(letter): #A = 0, Z = 25
#if it's not really a letter, return -999
#HOW CAN I GENERICALLY TEST FOR LENGTH. EVEN IN CASE OF A NUMBER THAT'S NOT PART OF AN ARRAY?
try:
#if len(letter) > 1:
# return (-999)
if letter < 'A' or letter > 'Z':
return (-999)
else: #it's a letter
return ord(letter)-ord('A')
except:
return (-999)
def wordToIdx(word,wordList, responseMustBeInWordList):
#if it's not in the list of stimuli, return None
try:
#http://stackoverflow.com/questions/7102050/how-can-i-get-a-python-generator-to-return-none-rather-than-stopiteration
firstMatchIdx = next((i for i, val in enumerate(wordList) if val.upper()==word), None) #return i (index) unless no matches, in which case return None
#print('Looked for ',word,' in ',wordList,'\nfirstMatchIdx =',firstMatchIdx)
return firstMatchIdx
except:
if responseMustBeInWordList:
print('Unexpected error in wordToIdx with word=',word)
return (None)
#print header for data file
print('experimentPhase\ttrialnum\tsubject\ttask\t',file=dataFile,end='')
print('noisePercent\tleftStreamFlip\trightStreamFlip\t',end='',file=dataFile)
if task=='T1':
numRespsWanted = 2
dataFile.write('rightResponseFirst\t')
for i in range(numRespsWanted):
dataFile.write('cueSerialPos'+str(i)+'\t') #have to use write to avoid ' ' between successive text, at least until Python 3
dataFile.write('answer'+str(i)+'\t')
dataFile.write('response'+str(i)+'\t')
dataFile.write('correct'+str(i)+'\t')
dataFile.write('responsePosRelative'+str(i)+'\t')
print('seq1\tseq2\t',end='', file=dataFile) #assuming 2 streams
print('timingBlips',file=dataFile)
#end of header
def oneFrameOfStim( n,cues,cuesSerialPos,seq1,seq2,cueDurFrames,letterDurFrames,ISIframes,thisTrial,textStimuliStream1,textStimuliStream2,
noise,proportnNoise,allFieldCoords,numNoiseDots ):
#defining a function to draw each frame of stim.
#seq1 is an array of indices corresponding to the appropriate pre-drawn stimulus, contained in textStimuli
SOAframes = letterDurFrames+ISIframes
cueFrames = cuesSerialPos*SOAframes
stimN = int( np.floor(n/SOAframes) )
frameOfThisLetter = n % SOAframes #every SOAframes, new letter
showLetter = frameOfThisLetter < letterDurFrames #if true, it's not time for the blank ISI. it's still time to draw the letter
thisStimIdx = seq1[stimN] #which letter, from A to Z (1 to 26), should be shown?
#print ('stimN=',stimN, 'thisStimIdx=', thisStimIdx, ' SOAframes=',SOAframes, ' letterDurFrames=', letterDurFrames, ' (n % SOAframes) =', (n % SOAframes) ) #DEBUGOFF
if seq2 is not None:
thisStim2Idx = seq2[stimN]
#so that any timing problems occur just as often for every frame, always draw the letter and the cue, but simply draw it in the bgColor when it's not meant to be on
for cue in cues:
cue.setLineColor( bgColor )
if type(cueFrames) not in [tuple,list,np.ndarray]: #scalar. But need collection to do loop based on it
cueFrames = list([cueFrames])
for i in xrange( len(cueFrames) ): #check whether it's time for any cue. Assume first cueFrame is for first cue, etc.
thisCueFrame = cueFrames[i]
if n>=thisCueFrame and n<thisCueFrame+cueDurFrames:
cues[i].setLineColor( cueColor )
if showLetter:
textStimuliStream1[thisStimIdx].setColor( letterColor )
textStimuliStream2[thisStim2Idx].setColor( letterColor )
else:
textStimuliStream1[thisStimIdx].setColor( bgColor )
textStimuliStream2[thisStim2Idx].setColor( bgColor )
textStimuliStream1[thisStimIdx].flipHoriz = thisTrial['leftStreamFlip']
textStimuliStream2[thisStim2Idx].flipHoriz = thisTrial['rightStreamFlip']
textStimuliStream1[thisStimIdx].draw()
textStimuliStream2[thisStim2Idx].draw()
for cue in cues:
cue.draw() #will be drawn in backgruond color if it's not time for that
refreshNoise = False #Not recommended because takes longer than a frame, even to shuffle apparently. Or may be setXYs step
if proportnNoise>0 and refreshNoise:
if frameOfThisLetter ==0:
np.random.shuffle(allFieldCoords)
dotCoords = allFieldCoords[0:numNoiseDots]
noise.setXYs(dotCoords)
if proportnNoise>0:
noise.draw()
return True
# #######End of function definition that displays the stimuli!!!! #####################################
#############################################################################################################################
cues = list()
preCues = list()
for i in xrange(2):
cue = visual.Circle(myWin,
radius=cueRadius,#Martini used circles with diameter of 12 deg
lineColorSpace = 'rgb',
lineColor=bgColor,
lineWidth=6.0, #in pixels. Was thinner (2 pixels) in letter AB experiments
units = 'deg',
fillColorSpace = 'rgb',
fillColor=None, #beware, with convex shapes fill colors don't work
pos= [0,0], #the anchor (rotation and vertices are position with respect to this)
interpolate=True,
autoLog=False)#this stim changes too much for autologging to be useful
cues.append(cue)
#Precue to potentially inform the participant where the letter streams will appear
preCue = visual.Circle(myWin,
radius=2,#Martini used circles with diameter of 12 deg
lineColorSpace = 'rgb',
lineColor=bgColor,
lineWidth=4.0, #in pixels. Was thinner (2 pixels) in letter AB experiments
units = 'deg',
fillColorSpace = 'rgb',
fillColor='white', #beware, with convex shapes fill colors don't work
pos= [0,0], #the anchor (rotation and vertices are position with respect to this)
interpolate=True,
autoLog=False)#this stim changes too much for autologging to be useful
preCues.append(preCue)
ltrHeight = 2.5 #Martini letters were 2.5deg high
#All noise dot coordinates ultimately in pixels, so can specify each dot is one pixel
noiseFieldWidthDeg=ltrHeight *1.0
noiseFieldWidthPix = int( round( noiseFieldWidthDeg*pixelperdegree ) )
def timingCheckAndLog(ts,trialN):
#check for timing problems and log them
#ts is a list of the times of the clock after each frame
interframeIntervs = np.diff(ts)*1000
#print ' interframe intervs were ',around(interframeIntervs,1) #DEBUGOFF
frameTimeTolerance=.3 #proportion longer than refreshRate that will not count as a miss
longFrameLimit = np.round(1000/refreshRate*(1.0+frameTimeTolerance),2)
idxsInterframeLong = np.where( interframeIntervs > longFrameLimit ) [0] #frames that exceeded 150% of expected duration
numCasesInterframeLong = len( idxsInterframeLong )
if numCasesInterframeLong >0 and (not demo):
longFramesStr = 'ERROR,'+str(numCasesInterframeLong)+' frames were longer than '+str(longFrameLimit)+' ms'
if demo:
longFramesStr += 'not printing them all because in demo mode'
else:
longFramesStr += ' apparently screen refreshes skipped, interframe durs were:'+\
str( np.around( interframeIntervs[idxsInterframeLong] ,1 ) )+ ' and was these frames: '+ str(idxsInterframeLong)
if longFramesStr != None:
logging.error( 'trialnum='+str(trialN)+' '+longFramesStr )
if not demo:
flankingAlso=list()
for idx in idxsInterframeLong: #also print timing of one before and one after long frame
if idx-1>=0:
flankingAlso.append(idx-1)
else: flankingAlso.append(np.NaN)
flankingAlso.append(idx)
if idx+1<len(interframeIntervs): flankingAlso.append(idx+1)
else: flankingAlso.append(np.NaN)
flankingAlso = np.array(flankingAlso)
flankingAlso = flankingAlso[np.negative(np.isnan(flankingAlso))] #remove nan values
flankingAlso = flankingAlso.astype(np.integer) #cast as integers, so can use as subscripts
logging.info( 'flankers also='+str( np.around( interframeIntervs[flankingAlso], 1) ) ) #because this is not an essential error message, as previous one already indicates error
#As INFO, at least it won't fill up the console when console set to WARNING or higher
return numCasesInterframeLong
#end timing check
trialClock = core.Clock()
numTrialsCorrect = 0;
numTrialsApproxCorrect = 0;
numTrialsEachCorrect= np.zeros( numRespsWanted )
numTrialsEachApproxCorrect= np.zeros( numRespsWanted )
def do_RSVP_stim(thisTrial, cues, preCues, seq1, seq2, proportnNoise,trialN):
#relies on global variables:
# textStimuli, logging, bgColor
# thisTrial should have 'cueSerialPos'
global framesSaved #because change this variable. Can only change a global variable if you declare it
cuesSerialPos = [] #will contain the serial positions in the stream of all the cues (corresponding to the targets)
cuesSerialPos.append(thisTrial['cueSerialPos']) #stream1
cuesSerialPos.append(thisTrial['cueSerialPos']) #stream2
cuesSerialPos = np.array(cuesSerialPos)
noise = None; allFieldCoords=None; numNoiseDots=0
if proportnNoise > 0: #gtenerating noise is time-consuming, so only do it once per trial. Then shuffle noise coordinates for each letter
(noise,allFieldCoords,numNoiseDots) = createNoise(proportnNoise,myWin,noiseFieldWidthPix, bgColor)
preDrawStimToGreasePipeline = list() #I don't know why this works, but without drawing it I have consistent timing blip first time that draw ringInnerR for phantom contours
for cue in cues:
cue.setLineColor(bgColor)
preDrawStimToGreasePipeline.extend([cue])
for stim in preDrawStimToGreasePipeline:
stim.draw()
myWin.flip(); myWin.flip()
#end preparation of stimuli
core.wait(.1);
trialClock.reset()
fixatnPeriodMin = 0.3
fixatnPeriodFrames = int( (np.random.rand(1)/2.+fixatnPeriodMin) *refreshRate) #random interval between 800ms and 1.3s
ts = list(); #to store time of each drawing, to check whether skipped frames
for i in range(fixatnPeriodFrames+20): #prestim fixation interval
#if i%4>=2 or demo or exportImages: #flicker fixation on and off at framerate to see when skip frame
# fixation.draw()
#else: fixationBlank.draw()
for preCue in preCues:
preCue.draw()
fixationPoint.draw()
myWin.flip() #end fixation interval
#myWin.setRecordFrameIntervals(True); #can't get it to stop detecting superlong frames
t0 = trialClock.getTime()
for n in range(trialDurFrames): #this is the loop for this trial's stimulus!
worked = oneFrameOfStim( n,cues,cuesSerialPos,seq1,seq2,cueDurFrames,letterDurFrames,ISIframes,thisTrial,textStimuliStream1,textStimuliStream2,
noise,proportnNoise,allFieldCoords,numNoiseDots ) #draw letter and possibly cue and noise on top
if thisTrial['wordEccentricity'] > 2: #kludge to avoid drawing fixation in super-near condition for Cheryl
fixationPoint.draw()
if exportImages:
myWin.getMovieFrame(buffer='back') #for later saving
framesSaved +=1
myWin.flip()
t=trialClock.getTime()-t0; ts.append(t);
#end of big stimulus loop
myWin.setRecordFrameIntervals(False);
if task=='T1':
respPromptStim.setText('What was circled?',log=False)
else: respPromptStim.setText('Error: unexpected task',log=False)
postCueNumBlobsAway=-999 #doesn't apply to non-tracking and click tracking task
#print('cuesSerialPos=',cuesSerialPos, 'cuesSerialPos.dtype =',cuesSerialPos.dtype, 'type(seq1)=',type(seq1))
seq1 = np.array(seq1) #convert seq1 list to array so that can index it with multiple indices (cuesSerialPos)
#print('seq1[cuesSerialPos]=', seq1[cuesSerialPos])
seq2= np.array(seq2) #convert seq2 list to array so that can index it with multiple indices (cuesSerialPos)
correctAnswerIdxsStream1 = np.array( seq1[cuesSerialPos] )
correctAnswerIdxsStream2 = np.array( seq2[cuesSerialPos] )
#print('correctAnswerIdxsStream1=',correctAnswerIdxsStream1)#, 'wordList[correctAnswerIdxsStream1[0]]=',wordList[correctAnswerIdxsStream1[0]])
return cuesSerialPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2,ts
def handleAndScoreResponse(passThisTrial,response,responseAutopilot,task,stimSequence,cueSerialPos,correctAnswerIdx):
#Handle response, calculate whether correct, ########################################
#responses are actual characters
#correctAnswer is index into stimSequence
#autopilot is global variable
if autopilot or passThisTrial:
response = responseAutopilot
#print('handleAndScoreResponse correctAnswerIdxs=',correctAnswerIdxs,'\nstimSequence=',stimSequence, '\nwords=',wordList)
correct = 0
approxCorrect = 0
posOfResponse = -999
responsePosRelative = -999
idx = correctAnswerIdx
print('correctAnswerIdx = ',correctAnswerIdx)
correctAnswer = wordList[idx].upper()
responseString=response
responseString= responseString.upper()
#print('correctAnswer=',correctAnswer ,' responseString=',responseString)
if correctAnswer == responseString:
correct = 1
#print('correct=',correct)
responseMustBeInWordList = True
if len(stimSequence) != len(wordList):
responseMustBeInWordList = False
#stimSeqAsLetters = list()
#for letter in stimSequence:
# stimSeqAsLetters.append( chr( ord('A') + letter ) )
#letterIdxOfAlphabet = ord( responseString.upper() ) - ord( 'A')
#print("Sending to responseWordIdx stimSequence=",stimSequence," responseString=",responseString, "stimSeqAsLetters=",stimSeqAsLetters, "responseMustBeInWordList=",responseMustBeInWordList)
responseWordIdx = wordToIdx(responseString.upper(),wordList, responseMustBeInWordList)
print('responseWordIdx = ', responseWordIdx, ' stimSequence=', stimSequence)
if responseWordIdx is None: #response is not in the wordList
posOfResponse = -999
logging.warn('Response was not present in the stimulus stream')
else:
posOfResponse= np.where( np.array(stimSequence)==responseWordIdx ) #Assumes that the response was in the stimulus sequence
print("posOfResponse=",posOfResponse, "responseWordIdx=",responseWordIdx,"stimSequence=",stimSequence, "type(stimSequence)=",type(stimSequence))
posOfResponse= posOfResponse[0] #list with two entries, want first which will be array of places where the response was found in the sequence
if len(posOfResponse) > 1:
logging.error('Expected response to have occurred in only one position in stream')
elif len(posOfResponse) == 0:
logging.error('Expected response to have occurred somewhere in the stream')
raise ValueError('Expected response to have occurred somewhere in the stream')
else:
posOfResponse = posOfResponse[0] #first element of list (should be only one element long
responsePosRelative = posOfResponse - cueSerialPos
approxCorrect = abs(responsePosRelative)<= 3 #Vul efficacy measure of getting it right to within plus/minus
#print('wordToIdx(',responseString,',',wordList,')=',responseWordIdx,' stimSequence=',stimSequence,'\nposOfResponse = ',posOfResponse) #debugON
#print response stuff to dataFile
print('correctAnswer=',correctAnswer,' correct=',correct, 'responsePosRelative=',responsePosRelative)
#header was answerPos0, answer0, response0, correct0, responsePosRelative0
print(cueSerialPos,'\t', end='', file=dataFile)
print(correctAnswer, '\t', end='', file=dataFile) #answer0
print(responseString, '\t', end='', file=dataFile) #response0
print(correct, '\t', end='',file=dataFile) #correct0
print(responsePosRelative, '\t', end='',file=dataFile) #responsePosRelative0
return correct,approxCorrect,responsePosRelative
#end handleAndScoreResponses
def play_high_tone_correct_low_incorrect(correct, passThisTrial=False):
highA = sound.Sound('G',octave=5, sampleRate=6000, secs=.3, bits=8)
low = sound.Sound('F',octave=3, sampleRate=6000, secs=.3, bits=8)
highA.setVolume(0.9)
low.setVolume(1.0)
if correct:
highA.play()
elif passThisTrial:
high= sound.Sound('G',octave=4, sampleRate=2000, secs=.08, bits=8)
for i in range(2):
high.play(); low.play();
else: #incorrect
low.play()
def instructions():
instrcolor = 'white'
preInstructions = visual.TextStim(myWin, text = "Press a key to see the instructions",pos=(0, 0),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging )
Instructions1 = visual.TextStim(myWin, text = "Instructions",pos=(0, .8),colorSpace='rgb',color=(0,0,0),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging )
Instructions2 = visual.TextStim(myWin, text = "Please rest your eyes on the red dot at all times",pos=(0, -.2),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging )
Instructions3 = visual.TextStim(myWin, text = "Press Space to Continue",pos=(0, -.9), colorSpace='rgb',color=(0,0,0),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging )
Instructions4b = visual.TextStim(myWin, text = "On each trial, two letter streams will be presented with each letter flashing for a fraction of a second.",pos=(0, 0),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging )
Instructions5b = visual.TextStim(myWin, text = "Two letters will be targeted with white circle on each trial. Try to remember these letters.",pos=(0, 0),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging )
Instructions6 = visual.TextStim(myWin, text = "After the letter streams, you will need to select the letters you just saw by clicking the letter on the screen. \nSome of the trials will require you to choose the left letter first \nOthers will require you to choose the right one first.", pos=(0,0), colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging )
Instructions7 = visual.TextStim(myWin, text = "Press a key to begin the experiment",pos=(0, 0), colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging )
Instructions9 = visual.TextStim(myWin, text = "If you have any questions, ask the experimentor now.",pos=(0, 0),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging )
Instructions10 = visual.TextStim(myWin, text = "If you don't know the letter, you can guess.",pos=(0, 0),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging )
preInstructions.draw()
myWin.flip()
event.waitKeys()
Instructions1.draw()
Instructions2.draw()
Instructions3.draw()
fixationPoint.draw()
myWin.flip()
event.waitKeys()
Instructions1.draw()
Instructions4b.draw()
Instructions3.draw()
myWin.flip()
event.waitKeys()
Instructions1.draw()
Instructions5b.draw()
Instructions3.draw()
myWin.flip()
event.waitKeys()
Instructions1.draw()
Instructions6.draw()
Instructions3.draw()
myWin.flip()
event.waitKeys()
Instructions1.draw()
Instructions3.draw()
Instructions10.draw()
myWin.flip()
event.waitKeys()
Instructions1.draw()
Instructions9.draw()
Instructions3.draw()
myWin.flip()
event.waitKeys()
Instructions7.draw()
myWin.flip()
event.waitKeys()
changeToUpper = False #Chery's experiment
expStop=False
nDoneMain = -1 #change to zero once start main part of experiment
if doStaircase:
#create the staircase handler
useQuest = True
if useQuest:
staircase = data.QuestHandler(startVal = 95,
startValSd = 80,
stopInterval= 1, #sd of posterior has to be this small or smaller for staircase to stop, unless nTrials reached
nTrials = staircaseTrials,
#extraInfo = thisInfo,
pThreshold = threshCriterion, #0.25,
gamma = 1./26,
delta=0.02, #lapse rate, I suppose for Weibull function fit
method = 'quantile', #uses the median of the posterior as the final answer
stepType = 'log', #will home in on the 80% threshold. But stepType = 'log' doesn't usually work
minVal=1, maxVal = 100
)
print('created QUEST staircase')
else:
stepSizesLinear = [.2,.2,.1,.1,.05,.05]
stepSizesLog = [log(1.4,10),log(1.4,10),log(1.3,10),log(1.3,10),log(1.2,10)]
staircase = data.StairHandler(startVal = 0.1,
stepType = 'log', #if log, what do I want to multiply it by
stepSizes = stepSizesLog, #step size to use after each reversal
minVal=0, maxVal=1,
nUp=1, nDown=3, #will home in on the 80% threshold
nReversals = 2, #The staircase terminates when nTrials have been exceeded, or when both nReversals and nTrials have been exceeded
nTrials=1)
print('created conventional staircase')
if prefaceStaircaseTrialsN > len(prefaceStaircaseNoise): #repeat array to accommodate desired number of easyStarterTrials
prefaceStaircaseNoise = np.tile( prefaceStaircaseNoise, ceil( prefaceStaircaseTrialsN/len(prefaceStaircaseNoise) ) )
prefaceStaircaseNoise = prefaceStaircaseNoise[0:prefaceStaircaseTrialsN]
phasesMsg = ('Doing '+str(prefaceStaircaseTrialsN)+'trials with noisePercent= '+str(prefaceStaircaseNoise)+' then doing a max '+str(staircaseTrials)+'-trial staircase')
print(phasesMsg); logging.info(phasesMsg)
#staircaseStarterNoise PHASE OF EXPERIMENT
corrEachTrial = list() #only needed for easyStaircaseStarterNoise
staircaseTrialN = -1; mainStaircaseGoing = False
while (not staircase.finished) and expStop==False: #staircase.thisTrialN < staircase.nTrials
if staircaseTrialN+1 < len(prefaceStaircaseNoise): #still doing easyStaircaseStarterNoise
staircaseTrialN += 1
noisePercent = prefaceStaircaseNoise[staircaseTrialN]
else:
if staircaseTrialN+1 == len(prefaceStaircaseNoise): #add these non-staircase trials so QUEST knows about them
mainStaircaseGoing = True
print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise)
staircase.importData(100-prefaceStaircaseNoise, np.array(corrEachTrial))
printStaircase(staircase, descendingPsycho, briefTrialUpdate=False, printInternalVal=True, alsoLog=False)
try: #advance the staircase
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)
noisePercent = 100. - staircase.next() #will step through the staircase, based on whether told it (addResponse) got it right or wrong
staircaseTrialN += 1
except StopIteration: #Need this here, even though test for finished above. I can't understand why finished test doesn't accomplish this.
print('stopping because staircase.next() returned a StopIteration, which it does when it is finished')
break #break out of the trials loop
#print('staircaseTrialN=',staircaseTrialN)
idxsStream1, idxsStream2, cues, preCues = calcAndPredrawStimuli(wordList,cues,preCues, staircaseTrials)
cuesSerialPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2, ts = \
do_RSVP_stim(thisTrial, cues, preCues, idxsStream1, idxsStream2, noisePercent/100.,staircaseTrialN)
numCasesInterframeLong = timingCheckAndLog(ts,staircaseTrialN)
expStop,passThisTrial,responses,buttons,responsesAutopilot = \
letterLineupResponse.doLineup(myWin,bgColor,myMouse,clickSound,badKeySound,possibleResps,showBothSides,sideFirstLeftRightCentral,autopilot) #CAN'T YET HANDLE MORE THAN 2 LINEUPS
if not expStop:
if mainStaircaseGoing:
print('staircase\t', end='', file=dataFile)
else:
print('staircase_preface\t', end='', file=dataFile)
#header start 'trialnum\tsubject\ttask\t'
print(staircaseTrialN,'\t', end='', file=dataFile) #first thing printed on each line of dataFile
print(subject,'\t',task,'\t', round(noisePercent,2),'\t', end='', file=dataFile)
correct,approxCorrect,responsePosRelative= handleAndScoreResponse(
passThisTrial,responses,responseAutopilot,task,sequenceLeft,cuesSerialPos[0],correctAnswerIdx )
#header then had seq1, seq2
print(idxsStream1,'\t',idxsStream2,'\t', end='', file=dataFile) #print the indexes into the wordList
print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile
core.wait(.06)
if feedback:
play_high_tone_correct_low_incorrect(correct, passThisTrial=False)
print('staircaseTrialN=', staircaseTrialN,' noisePercent=',round(noisePercent,3),' T1approxCorrect=',T1approxCorrect) #debugON
corrEachTrial.append(T1approxCorrect)
if mainStaircaseGoing:
staircase.addResponse(T1approxCorrect, intensity = 100-noisePercent) #Add a 1 or 0 to signify a correct/detected or incorrect/missed trial
#print('Have added an intensity of','{:.3f}'.format(100-noisePercent), 'T1approxCorrect =', T1approxCorrect, ' to staircase') #debugON
#ENDING STAIRCASE PHASE
if staircaseTrialN+1 < len(prefaceStaircaseNoise) and (staircaseTrialN>=0): #exp stopped before got through staircase preface trials, so haven't imported yet
print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise[0:staircaseTrialN+1])
staircase.importData(100-prefaceStaircaseNoise[0:staircaseTrialN], np.array(corrEachTrial))
print('framesSaved after staircase=',framesSaved) #debugON
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = ('prefaceStaircase phase' if expStop else '')
msg += ('ABORTED' if expStop else 'Finished') + ' staircase part of experiment at ' + timeAndDateStr
logging.info(msg); print(msg)
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)
#print('staircase.quantile=',round(staircase.quantile(),2),' sd=',round(staircase.sd(),2))
threshNoise = round(staircase.quantile(),3)
if descendingPsycho:
threshNoise = 100- threshNoise
threshNoise = max( 0, threshNoise ) #e.g. ff get all trials wrong, posterior peaks at a very negative number
msg= 'Staircase estimate of threshold = ' + str(threshNoise) + ' with sd=' + str(round(staircase.sd(),2))
logging.info(msg); print(msg)
myWin.close()
#Fit and plot data
fit = None
try:
intensityForCurveFitting = staircase.intensities
if descendingPsycho:
intensityForCurveFitting = 100-staircase.intensities #because fitWeibull assumes curve is ascending
fit = data.FitWeibull(intensityForCurveFitting, staircase.data, expectedMin=1/26., sems = 1.0/len(staircase.intensities))
except:
print("Fit failed.")
plotDataAndPsychometricCurve(staircase,fit,descendingPsycho,threshCriterion)
#save figure to file
pylab.savefig(fileName+'.pdf')
print('The plot has been saved, as '+fileName+'.pdf')
pylab.show() #must call this to actually show plot
else: #not staircase
noisePercent = defaultNoiseLevel
phasesMsg = 'Experiment will have '+str(trials.nTotal)+' trials. Letters will be drawn with superposed noise of ' + "{:.2%}".format(defaultNoiseLevel)
print(phasesMsg); logging.info(phasesMsg)
nDoneMain =0
while nDoneMain < trials.nTotal and expStop==False: #MAIN EXPERIMENT LOOP
if nDoneMain==0:
msg='Starting main (non-staircase) part of experiment'
logging.info(msg); print(msg)
instructions()
thisTrial = trials.next() #get a proper (non-staircase) trial
sequenceStream1, sequenceStream2, cues, preCues = calcAndPredrawStimuli(wordList,cues,preCues, thisTrial)
print('sequenceStream1=',sequenceStream1)
print('sequenceStream2=',sequenceStream2)
myWin.setMouseVisible(False)
cuesSerialPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2, ts = \
do_RSVP_stim(thisTrial, cues, preCues, sequenceStream1, sequenceStream2, noisePercent/100.,nDoneMain)
print('correctAnswerIdxsStream1=',correctAnswerIdxsStream1,'correctAnswerIdxsStream2=',correctAnswerIdxsStream2)
numCasesInterframeLong = timingCheckAndLog(ts,nDoneMain)
#call for each response
myMouse = event.Mouse()
alphabet = list(string.ascii_lowercase)
possibleResps = alphabet #possibleResps.remove('C'); possibleResps.remove('V')
expStop = list(); passThisTrial = list(); responses=list(); responsesAutopilot=list()
dL = [None]*numRespsWanted #dummy list for null values
expStop = copy.deepcopy(dL); responses = copy.deepcopy(dL); responsesAutopilot = copy.deepcopy(dL); passThisTrial=copy.deepcopy(dL)
responseOrder = range(numRespsWanted)
showBothSides=True
sideFirstLeftRightCentral = thisTrial['rightResponseFirst']
#if thisTrial['rightResponseFirst']: #change order of indices depending on rightResponseFirst. response0, answer0 etc refer to which one had to be reported first
#responseOrder.reverse() #this is necessary if using text input rather than lineup response
expStop,passThisTrial,responses,buttons,responsesAutopilot = \
letterLineupResponse.doLineup(myWin,bgColor,myMouse,clickSound,badKeySound,possibleResps,showBothSides,sideFirstLeftRightCentral,autopilot) #CAN'T YET HANDLE MORE THAN 2 LINEUPS
expStop = np.array(expStop).any(); passThisTrial = np.array(passThisTrial).any()
if not expStop:
print('main\t', end='', file=dataFile) #first thing printed on each line of dataFile to indicate main part of experiment, not staircase
print(nDoneMain,'\t', end='', file=dataFile)
print(subject,'\t',task,'\t', round(noisePercent,3),'\t', end='', file=dataFile)
print(thisTrial['leftStreamFlip'],'\t', end='', file=dataFile)
print(thisTrial['rightStreamFlip'],'\t', end='', file=dataFile)
print(thisTrial['rightResponseFirst'],'\t', end='', file=dataFile)
i = 0
eachCorrect = np.ones(numRespsWanted)*-999; eachApproxCorrect = np.ones(numRespsWanted)*-999
for i in range(numRespsWanted): #scored and printed to dataFile in left first, right second order even if collected in different order
if thisTrial['rightResponseFirst']:
if i==0:
sequenceStream = sequenceStream2; correctAnswerIdxs = correctAnswerIdxsStream2;
else: sequenceStream = sequenceStream1; correctAnswerIdxs = correctAnswerIdxsStream1;
else:
if i==0:
sequenceStream = sequenceStream1; correctAnswerIdxs = correctAnswerIdxsStream1;
else: sequenceStream = sequenceStream2; correctAnswerIdxs = correctAnswerIdxsStream2;
correct,approxCorrect,responsePosRelative = (
handleAndScoreResponse(passThisTrial,responses[i],responsesAutopilot,task,sequenceStream,thisTrial['cueSerialPos'],correctAnswerIdxs[i] ) )
eachCorrect[i] = correct
eachApproxCorrect[i] = approxCorrect
#header then had seq1, seq2. Save them
print(sequenceStream1,'\t',sequenceStream2,'\t', end='', file=dataFile) #print the indexes into the wordList
print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile
print('correct=',correct,' approxCorrect=',approxCorrect,' eachCorrect=',eachCorrect, ' responsePosRelative=', responsePosRelative)
numTrialsCorrect += eachCorrect.all() #so count -1 as 0
numTrialsApproxCorrect += eachApproxCorrect.all()
numTrialsEachCorrect += eachCorrect #list numRespsWanted long
numTrialsEachApproxCorrect += eachApproxCorrect #list numRespsWanted long
if(sum(eachCorrect)==2):
allCorrect=True
else:
allCorrect=False
if exportImages: #catches one frame of response
myWin.getMovieFrame() #I cant explain why another getMovieFrame, and core.wait is needed
framesSaved +=1; core.wait(.1)
myWin.saveMovieFrames('images_sounds_movies/frames.png') #mov not currently supported
expStop=True
core.wait(.1)
if feedback: play_high_tone_correct_low_incorrect(allCorrect, passThisTrial=False)
nDoneMain+=1
dataFile.flush(); logging.flush()
print('nDoneMain=', nDoneMain,' trials.nTotal=',trials.nTotal) #' trials.thisN=',trials.thisN
if (trials.nTotal > 6 and nDoneMain > 2 and nDoneMain %
( trials.nTotal*pctCompletedBreak/100. ) ==1): #dont modulus 0 because then will do it for last trial
nextText.setText('Press "SPACE" to continue!')
nextText.draw()
progressMsg = 'Completed ' + str(nDoneMain) + ' of ' + str(trials.nTotal) + ' trials'
NextRemindCountText.setText(progressMsg)
NextRemindCountText.draw()
myWin.flip() # myWin.flip(clearBuffer=True)
waiting=True
while waiting:
if autopilot: break
elif expStop == True:break
for key in event.getKeys(): #check if pressed abort-type key
if key in ['space','ESCAPE']:
waiting=False
if key in ['ESCAPE']:
expStop = True
myWin.clearBuffer()
core.wait(.2); time.sleep(.2)
#end main trials loop
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = 'Finishing at '+timeAndDateStr
print(msg); logging.info(msg)
if expStop:
msg = 'user aborted experiment on keypress with trials done=' + str(nDoneMain) + ' of ' + str(trials.nTotal+1)
print(msg); logging.error(msg)
if not doStaircase and (nDoneMain >0):
msg = 'Of ' + str(nDoneMain)+' trials, on '+str(numTrialsCorrect*1.0/nDoneMain*100.)+'% of all trials all targets reported exactly correct'
print(msg); logging.info(msg)
msg= 'All targets approximately correct in '+ str( round(numTrialsApproxCorrect*1.0/nDoneMain*100,1)) + '% of trials'
print(msg); logging.info(msg)
for i in range(numRespsWanted):
msg = 'stream'+str(i)+': '+str( round(numTrialsEachCorrect[i]*1.0/nDoneMain*100.,2) ) + '% correct'
print(msg); logging.info(msg)
msg = 'stream' + str(i) + ': '+ str( round(numTrialsEachApproxCorrect[i]*1.0/nDoneMain*100,2) ) +'% approximately correct'
print(msg); logging.info(msg)
logging.flush(); dataFile.close()
myWin.close() #have to close window if want to show a plot
if quitFinder:
applescript="\'tell application \"Finder\" to launch\'" #turn Finder back on
shellCmd = 'osascript -e '+applescript
os.system(shellCmd) | {
"repo_name": "alexholcombe/twoWords",
"path": "twoWordsCherylwithIntro.py",
"copies": "1",
"size": "64243",
"license": "mit",
"hash": 5398931645272247000,
"line_mean": 57.4861111111,
"line_max": 419,
"alpha_frac": 0.6738633003,
"autogenerated": false,
"ratio": 3.602882620155908,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4776745920455908,
"avg_score": null,
"num_lines": null
} |
"""A lexical analyzer class for simple shell-like syntaxes."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
import os.path
import sys
class shlex:
"A lexical analyzer class for simple shell-like syntaxes."
def __init__(self, instream=None, infile=None):
if instream:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
self.commenters = '#'
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
self.whitespace = ' \t\r\n'
self.quotes = '\'"'
self.state = ' '
self.pushback = [];
self.lineno = 1
self.debug = 0
self.token = ''
self.filestack = []
self.source = None
if self.debug:
print 'shlex: reading from %s, line %d' \
% (self.instream, self.lineno)
def push_token(self, tok):
"Push a token onto the stack popped by the get_token method"
if self.debug >= 1:
print "shlex: pushing token " + `tok`
self.pushback = [tok] + self.pushback;
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
if self.pushback:
tok = self.pushback[0]
self.pushback = self.pushback[1:]
if self.debug >= 1:
print "shlex: popping token " + `tok`
return tok
# No pushback. Get a token.
raw = self.read_token()
# Handle inclusions
while raw == self.source:
(newfile, newstream) = self.sourcehook(self.read_token())
self.filestack.insert(0, (self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
self.lineno = 1
if self.debug:
print 'shlex: pushing to file %s' % (self.infile,)
raw = self.get_token()
# Maybe we got EOF instead?
while raw == "":
if len(self.filestack) == 0:
return ""
else:
self.instream.close()
(self.infile, self.instream, self.lineno) = self.filestack[0]
self.filestack = self.filestack[1:]
if self.debug:
print 'shlex: popping to %s, line %d' \
% (self.instream, self.lineno)
self.state = ' '
raw = self.get_token()
# Neither inclusion nor EOF
if self.debug >= 1:
if raw:
print "shlex: token=" + `raw`
else:
print "shlex: token=EOF"
return raw
def read_token(self):
"Read a token from the input stream (no pushback or inclusions)"
tok = ''
while 1:
nextchar = self.instream.read(1);
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
print "shlex: in state", repr(self.state), \
"I see character:", repr(nextchar)
if self.state is None:
self.token = ''; # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None; # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in whitespace state"
if self.token:
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
self.token = nextchar
self.state = nextchar
else:
self.token = nextchar
if self.token:
break # emit current token
else:
continue
elif self.state in self.quotes:
self.token = self.token + nextchar
if nextchar == self.state:
self.state = ' '
break
elif self.state == 'a':
if not nextchar:
self.state = None; # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in word state"
self.state = ' '
if self.token:
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif nextchar in self.wordchars or nextchar in self.quotes:
self.token = self.token + nextchar
else:
self.pushback = [nextchar] + self.pushback
if self.debug >= 2:
print "shlex: I see punctuation in word state"
self.state = ' '
if self.token:
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.debug > 1:
if result:
print "shlex: raw token=" + `result`
else:
print "shlex: raw token=EOF"
return result
def sourcehook(self, newfile):
"Hook called on a filename to be sourced."
if newfile[0] == '"':
newfile = newfile[1:-1]
# This implements cpp-like semantics for relative-path inclusion.
if type(self.infile) == type("") and not os.path.isabs(newfile):
newfile = os.path.join(os.path.dirname(self.infile), newfile)
return (newfile, open(newfile, "r"))
def error_leader(self, infile=None, lineno=None):
"Emit a C-compiler-like, Emacs-friendly error-message leader."
if not infile:
infile = self.infile
if not lineno:
lineno = self.lineno
return "\"%s\", line %d: " % (infile, lineno)
if __name__ == '__main__':
if len(sys.argv) == 1:
lexer = shlex()
else:
file = sys.argv[1]
lexer = shlex(open(file), file)
while 1:
tt = lexer.get_token()
if tt:
print "Token: " + repr(tt)
else:
break
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-2.0/Lib/shlex.py",
"copies": "4",
"size": "7034",
"license": "mit",
"hash": -7187342638287929000,
"line_mean": 36.0210526316,
"line_max": 79,
"alpha_frac": 0.4746943418,
"autogenerated": false,
"ratio": 4.546864899806076,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017779724411056456,
"num_lines": 190
} |
"""A lexical analyzer class for simple shell-like syntaxes."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
# push_source() and pop_source() made explicit by ESR, January 2001.
import os.path
import sys
__all__ = ["shlex"]
class shlex:
"A lexical analyzer class for simple shell-like syntaxes."
def __init__(self, instream=None, infile=None):
if instream is not None:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
self.commenters = '#'
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
self.whitespace = ' \t\r\n'
self.quotes = '\'"'
self.state = ' '
self.pushback = []
self.lineno = 1
self.debug = 0
self.token = ''
self.backslash = False
self.filestack = []
self.source = None
if self.debug:
print 'shlex: reading from %s, line %d' \
% (self.instream, self.lineno)
def push_token(self, tok):
"Push a token onto the stack popped by the get_token method"
if self.debug >= 1:
print "shlex: pushing token " + `tok`
self.pushback = [tok] + self.pushback
def push_source(self, newstream, newfile=None):
"Push an input source onto the lexer's input source stack."
self.filestack.insert(0, (self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
self.lineno = 1
if self.debug:
if newfile is not None:
print 'shlex: pushing to file %s' % (self.infile,)
else:
print 'shlex: pushing to stream %s' % (self.instream,)
def pop_source(self):
"Pop the input source stack."
self.instream.close()
(self.infile, self.instream, self.lineno) = self.filestack[0]
self.filestack = self.filestack[1:]
if self.debug:
print 'shlex: popping to %s, line %d' \
% (self.instream, self.lineno)
self.state = ' '
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
if self.pushback:
tok = self.pushback[0]
self.pushback = self.pushback[1:]
if self.debug >= 1:
print "shlex: popping token " + `tok`
return tok
# No pushback. Get a token.
raw = self.read_token()
# Handle inclusions
while raw == self.source:
spec = self.sourcehook(self.read_token())
if spec:
(newfile, newstream) = spec
self.push_source(newstream, newfile)
raw = self.get_token()
# Maybe we got EOF instead?
while raw == "":
if len(self.filestack) == 0:
return ""
else:
self.pop_source()
raw = self.get_token()
# Neither inclusion nor EOF
if self.debug >= 1:
if raw:
print "shlex: token=" + `raw`
else:
print "shlex: token=EOF"
return raw
def read_token(self):
"Read a token from the input stream (no pushback or inclusions)"
while 1:
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
print "shlex: in state", repr(self.state), \
"I see character:", repr(nextchar)
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in whitespace state"
if self.token:
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
self.token = nextchar
self.state = nextchar
else:
self.token = nextchar
if self.token:
break # emit current token
else:
continue
elif self.state in self.quotes:
self.token = self.token + nextchar
if nextchar == '\\':
if self.backslash:
self.backslash = False
else:
self.backslash = True
else:
if not self.backslash and nextchar == self.state:
self.state = ' '
break
elif self.backslash:
self.backslash = False
elif not nextchar: # end of file
if self.debug >= 2:
print "shlex: I see EOF in quotes state"
# XXX what error should be raised here?
raise ValueError, "No closing quotation"
elif self.state == 'a':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in word state"
self.state = ' '
if self.token:
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif nextchar in self.wordchars or nextchar in self.quotes:
self.token = self.token + nextchar
else:
self.pushback = [nextchar] + self.pushback
if self.debug >= 2:
print "shlex: I see punctuation in word state"
self.state = ' '
if self.token:
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.debug > 1:
if result:
print "shlex: raw token=" + `result`
else:
print "shlex: raw token=EOF"
return result
def sourcehook(self, newfile):
"Hook called on a filename to be sourced."
if newfile[0] == '"':
newfile = newfile[1:-1]
# This implements cpp-like semantics for relative-path inclusion.
if type(self.infile) == type("") and not os.path.isabs(newfile):
newfile = os.path.join(os.path.dirname(self.infile), newfile)
return (newfile, open(newfile, "r"))
def error_leader(self, infile=None, lineno=None):
"Emit a C-compiler-like, Emacs-friendly error-message leader."
if infile is None:
infile = self.infile
if lineno is None:
lineno = self.lineno
return "\"%s\", line %d: " % (infile, lineno)
if __name__ == '__main__':
if len(sys.argv) == 1:
lexer = shlex()
else:
file = sys.argv[1]
lexer = shlex(open(file), file)
while 1:
tt = lexer.get_token()
if tt:
print "Token: " + repr(tt)
else:
break
| {
"repo_name": "jrabbit/ubotu-fr",
"path": "src/shlex.py",
"copies": "14",
"size": "8159",
"license": "bsd-3-clause",
"hash": 3730988352145723400,
"line_mean": 36.4266055046,
"line_max": 76,
"alpha_frac": 0.4799607795,
"autogenerated": false,
"ratio": 4.5888638920134985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# Alex Kim - Minesweeper
import atexit as _atexit
import random as _random
import sys as _sys
def render_board(game_board):
# 0 1 2 3 4 5 6
# +-------------+
# 0|O O O O O O O|
# 1|O O O O O O O|
# 2|O O O O O O O|
# 3|O O O O O O O|
# 4|O O O O O O O|
# 5|O O O O O O O|
# 6|O O O O O O O|
# +-------------+
# get the dimensions
width = len(game_board[0])
# add some white space
print ""
header_row = [str(i) for i in range(width)]
print " " + " ".join(header_row)
boarder_row = [" +"] + (["-"] * (width * 2 - 1)) + ["+"]
print "".join(boarder_row)
for rn, row in enumerate(game_board):
# let's make a list of the row's elements, along with its borders, to then join & print
row_value = str(rn) + "|" + " ".join([render_tile(tile) for tile in row]) + "|"
print row_value
print "".join(boarder_row)
print ""
def create_game_board(height, width, mines=None):
"""
Create a gameboard of a specified size, with a specified number of mines
:param height: number of rows -- 1 indexed
:param width: number of columns -- 1 indexed
:param mines: Optional number of mines to lay on the board. Default is 10%
"""
# how many tiles the board needs
num_tiles = height * width
if not mines:
mines = num_tiles / 10
else:
assert mines < num_tiles
# each tile list has the meaning: [Visible (bool), Mine (bool), Adjacent Mines (int)]
# THERE WAS A CRAZY BUG HERE! when multiplying lists, python doesn't create a copy, but instead passes by reference
# this means if you change any of these lists, it affects the rest.
# tiles = ([[False, True, 0]] * mines) + ([[False, False, 0]] * (num_tiles - mines))
# Instead use this... these should not be references to the same list but instead independent lists.
tiles = [[False, True, 0] for _ in range(mines)] + [[False, False, 0] for _ in range(num_tiles - mines)]
# now randomize the list's order inplace, so we can just use pop() to take from the list. Right now, the list is all
# mines first, then all non mines, which would be a bit too easy
_random.shuffle(tiles)
# initialize the board as an empty list
game_board = list()
for i in range(height):
# pop() removes the end of the list and returns the value it removed to you
# for _ in range(N) is the idiom for when you want to iterate N times but don't care about the iterators value
# here, _ means "I don't give a shit what the value is"
game_board.append([tiles.pop() for _ in range(width)])
# update the board with the right number of neighboring mines
for row_num in range(height):
for col_num in range(width):
neighboring_mines = len(get_neighbors(game_board, row_num, col_num, is_mine=True))
game_board[row_num][col_num][2] = neighboring_mines
return game_board
def get_neighbors(game_board, row_idx, col_idx, is_mine=None, is_visible=None):
"""
for a given game board and tile location, get the neighbors without going out of bounds of the board
:param game_board: the game board
:param row_idx: row index number of the tile in question
:param col_idx: column index number of the tile in question
:param is_mine: mine state of returned neighbors
:param is_visible: visible state of returned neighbors
:returns list of lists [(row_idx, col_idx), tile value] for the neighbors
"""
# bounds for where neighbors could be (min_row_num, max_row_num). You need to protect against this edge case:
# [1,2,3,4][-1:1] --> []
# the index method [list of things][start:finish:step] works here, but if fails when you start with a negative
# and finish with a positive when you don't have a step that's negative.
# the same issue doesn't exist for the finish > length case
height = len(game_board) - 1
width = len(game_board[0]) - 1
row_min = row_idx - 1 if row_idx > 0 else 0
row_max = row_idx + 1 if row_idx < height else height
col_min = col_idx - 1 if col_idx > 0 else 0
col_max = col_idx + 1 if col_idx < width else width
neighbors = list()
# now make the coordinates and get the values
for row_num in range(row_min, row_max + 1):
for col_num in range(col_min, col_max + 1):
# get the tile in question
tile = game_board[row_num][col_num]
if not (row_num == row_idx and col_num == col_idx) \
and (tile[0] == is_visible or is_visible is None) \
and (tile[1] == is_mine or is_mine is None):
# basically, just check the state of the tile. Don't append if:
# If we're on the central tile
# If it doesn't match what we wanted from the optionals
# Otherwise append
neighbors.append([(row_num, col_num), tile])
return neighbors
def render_tile(tile):
"""
For a given tile, render its character
:param tile:
:returns str value of tile
"""
# each tile list has the meaning: [Visible (bool), Mine (bool), Adjacent Mines (int)]
# visible, mine, adjacent_mines = tile
if tile[0]:
# if the tile is visible
if tile[1]:
# if the tile is a mine
return "X"
elif tile[2]:
# if the tile has neighboring mines
return str(tile[2])
else:
return " "
else:
return "+"
# Handle player input
def select_space(game_board):
"""
Get user input for space selection
:param game_board: Used for determining bounds of board
:return: (row, col) location of selection
"""
max_x = len(game_board[0]) - 1
max_y = len(game_board) - 1
row = verify_user_input("Enter Row:", lambda x: int(x), lambda x: 0 <= int(x) <= max_x)
col = verify_user_input("Enter Column:", lambda x: int(x), lambda x: 0 <= int(x) <= max_y)
game_on, game_board = update_board(game_board, row, col)
return game_on, game_board
def verify_user_input(initial_prompt, cast_function, verify_function):
"""
As this comes up a lot, script the asking the user for a valid input
:param cast_function: cast input to required type
:param initial_prompt: string phrase to initially ask
:param verify_function: function used to verify input (returns bool)
:return: valid input
"""
thing = raw_input(initial_prompt)
while True:
try:
valid_thing = cast_function(thing)
except:
# I know you're not supposed to do this, but I really don't care about the error type or message
thing = raw_input("Invalid! Try Again:")
continue
if verify_function(valid_thing):
return valid_thing
def update_board(game_board, row, col):
"""
For a give tile selection, update the game board
:param game_board: the game board
:param row: user selected row
:param col: user selected column
:return: game_on, game_board
"""
tile = game_board[row][col]
if tile[1]:
# Hit a mine -- unmask all mines and end the game
for r in game_board:
for c in r:
if c[1]:
c[0] = True
# set the game to over
game_on = False
else:
# not a mine
# unmask the selected one element
tile[0] = True
game_on = True
if tile[2] == 0:
# hit a blank space, unmask all the adjacent parts recursively
game_board = pathfind(game_board, row, col)
return game_on, game_board
def pathfind(game_board, row, col):
"""
for a given starting point, traverse the board and unmask the appropriate elements
:param game_board: the board
:param start_x: starting row
:param start_y: starting column
:return: the updated board
"""
# this is a list of all the elements that need to be checked -- only non-visible, non mines need to be checked...
todo = [c for c in get_neighbors(game_board, row, col, is_mine=False, is_visible=False)]
# set of all previously visited or queued tiles
hist_coords = set([coords for coords, _ in todo])
# loop while there are still some elements to check
while todo:
# grab the tile to be checked
(row, col), tile = todo.pop(0)
# this is made visible regardless of what it is (we know it's not a mine).
# Leveraging pass by reference hard here
tile[0] = True
if tile[2] == 0:
# this is a blank tile, so we need to check its neighbors as well
# the generated list is only new elements from searching at this location
new_todo = [n for n in get_neighbors(game_board, row, col, is_mine=False, is_visible=False)
if n[0] not in hist_coords]
# update the trackers with the new locations
hist_coords.update([coords for coords, _ in new_todo])
todo += new_todo
return game_board
def generate_board():
"""
Create a new game board by getting the user's inputs
:return: game_on, game_board
"""
rows = verify_user_input("Enter number of rows:", lambda x: int(x), lambda x: x > 1)
cols = verify_user_input("Enter number of columns:", lambda x: int(x), lambda x: x > 1)
mines = verify_user_input("Enter number of mines:", lambda x: int(x), lambda x: x > 0)
game_on = True
game_board = create_game_board(rows, cols, mines=mines)
return game_on, game_board
def check_victory(game_board):
"""
Check if the player has won -- loop through looking for masked non-mines
:param game_board: the game board
:returns: bool
"""
for row in game_board:
for is_visible, is_mine, _ in row:
if not is_mine and not is_visible:
# Still a masked tile that's not a mine left on the board
return False
# If we do the full loop, then they are a winner!
return True
def run_game():
"""
Run the game
"""
while True:
# Game test
print "".join(["-"] * 11)
print "Minesweeper"
game_on, game_board = generate_board()
if game_on:
render_board(game_board)
while game_on:
game_on, game_board = select_space(game_board)
render_board(game_board)
if not game_on:
print "Sorry, you lose!"
elif check_victory(game_board):
print "Congratulations, you win!"
# end the game
game_on = False
# check if they would like to play again
play_again = raw_input("Would you like to play again? [y]es/[n]o:").lower()
while True:
if play_again in {"y", "yes"}:
break
elif play_again in {"n", "no"}:
_sys.exit(0)
else:
play_again = raw_input("Invalid selection! [y]es/[n]o:").lower()
@_atexit.register
def say_goodbye():
print ""
print "Thanks for playing"
if __name__ == "__main__":
try:
run_game()
except KeyboardInterrupt:
_sys.exit(0)
| {
"repo_name": "ajkim141/minesweeper-flask",
"path": "minesweeper.py",
"copies": "1",
"size": "11288",
"license": "mit",
"hash": -2069639437521127400,
"line_mean": 32.8978978979,
"line_max": 120,
"alpha_frac": 0.5948795181,
"autogenerated": false,
"ratio": 3.6828711256117455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4777750643711745,
"avg_score": null,
"num_lines": null
} |
"""AlexNet cnn model"""
import numpy as np
import tensorflow as tf
from model_utils import *
from model import Model
class AlexNet(Model):
def __init__(self, class_count, input_size_w=28,
input_size_h=None, params_path=None,
is_training=False):
"""Constructor.
Instantiates initial params"""
super(AlexNet, self).__init__(params_path)
self.input_size = [input_size_w, input_size_h or input_size_w]
self.class_count = class_count
self.dropout_prob = tf.placeholder(tf.float32)
self.create(self.params, is_training=is_training)
def save_params(self, sess, filepath):
"""Evaluates model params and saves them to file"""
values = []
for i in range(self.params.shape[0]):
row_values = []
for j in range(self.params.shape[1]):
value = self.params[i][j].eval(sess)
row_values.append(value)
values.append(row_values)
super(AlexNet, self).save_params(values, filepath)
def create(self, params, is_training=False):
# Load model parameters if they exist by calling parent class
super(AlexNet, self).create(params)
# Input layer
self.x = tf.placeholder(tf.float32,
[None,self.input_size[0] * self.input_size[1]])
x_img = tf.reshape(self.x, [-1,self.input_size[0],self.input_size[1],1])
print('Input:', x_img.get_shape())
# Conv2D layer 1
conv1_W = weights_var(value=self.weights[0], shape=[5,5,1,32])
conv1_b = biases_var(value=self.biases[0], shape=[32])
conv1 = relu(conv2D(x_img, conv1_W), conv1_b, name='conv1')
pool1 = max_pool_2x2(conv1, name='pool1')
print('Conv1:', pool1.get_shape())
# Conv2D layer 2
conv2_W = weights_var(value=self.weights[1], shape=[5,5,32,64])
conv2_b = biases_var(value=self.biases[1], shape=[64])
conv2 = relu(conv2D(pool1, conv2_W), conv2_b, name='conv2')
pool2 = max_pool_2x2(conv2, name='pool2')
print('Conv2:', pool2.get_shape())
# Conv2D layers 3-5
conv3_W = weights_var(value=self.weights[2], shape=[5,5,64,128])
conv3_b = biases_var(value=self.biases[2], shape=[128])
conv3 = relu(conv2D(pool2, conv3_W), conv3_b, name='conv3')
conv4_W = weights_var(value=self.weights[3], shape=[5,5,128,256])
conv4_b = biases_var(value=self.biases[3], shape=[256])
conv4 = relu(conv2D(conv3, conv4_W), conv4_b, name='conv4')
conv5_W = weights_var(value=self.weights[4], shape=[5,5,256,512])
conv5_b = biases_var(value=self.biases[4], shape=[512])
conv5 = relu(conv2D(conv4, conv5_W), conv5_b, name='conv5')
pool5 = max_pool_2x2(conv5, name='pool5')
print('Conv5:', pool5.get_shape())
# FC layer 6
w = int(round(self.input_size[0] / 2 / 2 / 2))
h = int(round(self.input_size[1] / 2 / 2 / 2))
fc6_W = weights_var(value=self.weights[5], shape=[w*h*512,1024])
fc6_b = biases_var(value=self.biases[5], shape=[1024])
pool5_flat = tf.reshape(pool5, [-1,w*h*512])
fc6 = fc_layer(pool5_flat, fc6_W, fc6_b, name='fc6')
print('FC6:', fc6.get_shape())
fc6_dropout = tf.nn.dropout(fc6, self.dropout_prob)
# FC layer 7
fc7_W = weights_var(value=self.weights[6], shape=[1024,512])
fc7_b = biases_var(value=self.biases[6], shape=[512])
fc7 = fc_layer(fc6_dropout, fc7_W, fc7_b, name='fc7')
print('FC7:', fc7.get_shape())
fc7_dropout = tf.nn.dropout(fc7, self.dropout_prob)
# Logits layer
logits_W = weights_var(value=self.weights[7], shape=[512,self.class_count])
logits_b = biases_var(value=self.biases[7], shape=[self.class_count])
self.logits = tf.matmul(fc7_dropout, logits_W, name='logits') + logits_b
print('Logits:', self.logits.get_shape())
# True labels
self.labels = tf.placeholder(tf.float32, [None,self.class_count])
# Loads/instantiates weights and biases
if is_training:
self.weights = np.array([conv1_W, conv2_W,
conv3_W, conv4_W,
conv5_W, fc6_W,
fc7_W, logits_W])
self.biases = np.array([conv1_b, conv2_b,
conv3_b, conv4_b,
conv5_b, fc6_b,
fc7_b, logits_b])
self.params = np.array([self.weights, self.biases])
def get_model(class_count, input_size_w=28, input_size_h=None,
params_path=None, is_training=False):
return AlexNet(class_count, input_size_w, input_size_h,
params_path, is_training=is_training)
def main():
my_model = AlexNet(10)
if __name__ == "__main__":
main()
| {
"repo_name": "Lazea/TensorFlow",
"path": "models/alexnet.py",
"copies": "1",
"size": "4969",
"license": "apache-2.0",
"hash": -4367961704092285400,
"line_mean": 40.0661157025,
"line_max": 83,
"alpha_frac": 0.5636949084,
"autogenerated": false,
"ratio": 3.1251572327044026,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9165687988281125,
"avg_score": 0.004632830564655437,
"num_lines": 121
} |
'''AlexNet for CIFAR10. FC layers are removed. Paddings are adjusted.
Without BN, the start learning rate should be 0.01
(c) YANG, Wei
'''
import torch.nn as nn
__all__ = ['alexnet']
class AlexNet(nn.Module):
def __init__(self, num_classes=10, nchannels=3):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(nchannels, 64, kernel_size=11, stride=4, padding=5),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.classifier = nn.Linear(256, num_classes)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def alexnet(**kwargs):
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
"""
model = AlexNet(**kwargs)
return model
| {
"repo_name": "google-research/understanding-curricula",
"path": "third_party/models/alexnet.py",
"copies": "1",
"size": "1380",
"license": "apache-2.0",
"hash": 3504049144795640300,
"line_mean": 30.3636363636,
"line_max": 74,
"alpha_frac": 0.5753623188,
"autogenerated": false,
"ratio": 3.143507972665148,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9211294533889391,
"avg_score": 0.0015151515151515152,
"num_lines": 44
} |
"""AlexNet model."""
from athenet import Network
from athenet.layers import ConvolutionalLayer, ReLU, LRN, MaxPool, \
FullyConnectedLayer, Dropout, Softmax
from athenet.utils import load_data, get_bin_path
ALEXNET_FILENAME = 'alexnet_weights.pkl.gz'
def alexnet(trained=True, weights_filename=ALEXNET_FILENAME, weights_url=None):
if trained:
weights = load_data(get_bin_path(weights_filename), weights_url)
if weights is None:
raise Exception("cannot load AlexNet weights")
# Normalization parameters
local_range = 5
alpha = 0.0001
beta = 0.75
k = 1
net = Network([
ConvolutionalLayer(image_shape=(227, 227, 3),
filter_shape=(11, 11, 96),
stride=(4, 4)),
ReLU(),
LRN(local_range=local_range,
alpha=alpha,
beta=beta,
k=k),
MaxPool(poolsize=(3, 3),
stride=(2, 2)),
ConvolutionalLayer(filter_shape=(5, 5, 256),
padding=(2, 2),
n_groups=2),
ReLU(),
LRN(local_range=local_range,
alpha=alpha,
beta=beta,
k=k),
MaxPool(poolsize=(3, 3),
stride=(2, 2)),
ConvolutionalLayer(filter_shape=(3, 3, 384),
padding=(1, 1)),
ReLU(),
ConvolutionalLayer(filter_shape=(3, 3, 384),
padding=(1, 1),
n_groups=2),
ReLU(),
ConvolutionalLayer(filter_shape=(3, 3, 256),
padding=(1, 1),
n_groups=2),
ReLU(),
MaxPool(poolsize=(3, 3),
stride=(2, 2)),
FullyConnectedLayer(4096),
ReLU(),
Dropout(),
FullyConnectedLayer(4096),
ReLU(),
Dropout(),
FullyConnectedLayer(1000),
Softmax()
])
if trained:
net.set_params(weights)
return net
| {
"repo_name": "heurezjusz/Athenet",
"path": "athenet/models/alexnet.py",
"copies": "2",
"size": "2032",
"license": "bsd-2-clause",
"hash": 6651648401164641000,
"line_mean": 28.8823529412,
"line_max": 79,
"alpha_frac": 0.4970472441,
"autogenerated": false,
"ratio": 3.89272030651341,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5389767550613409,
"avg_score": null,
"num_lines": null
} |
""" AlexNet
Dataset classification task: 28x28x3 shape (adjusted AlexNet)
References:
ImageNet Classification with Deep Convolutional Neural Networks.
K. Simonyan, A. Zisserman. arXiv technical report, 2014.
Links:
http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf
"""
from __future__ import division, print_function, absolute_import
from .model_helper import transfer_params_decode, define_layers
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression
from tflearn.layers.normalization import local_response_normalization
keep_prob = 0.5
# Building 'AlexNet Network'
def build_alexnet(learning_rate, n_class=10):
network = input_data(shape=[None, 28, 28, 3], name='input')
network = conv_2d(network, 12, 11, strides=4, activation='relu', scope='conv1')
network = max_pool_2d(network, 3, strides=2, name='maxpool1')
network = local_response_normalization(network)
network = conv_2d(network, 32, 5, activation='relu', scope='conv2')
network = max_pool_2d(network, 3, strides=2, name='maxpool2')
network = local_response_normalization(network)
network = conv_2d(network, 48, 3, activation='relu', scope='conv3_1')
network = conv_2d(network, 48, 3, activation='relu', scope='conv3_2')
network = conv_2d(network, 32, 3, activation='relu', scope='conv3_3')
network = max_pool_2d(network, 3, strides=2, name='maxpool3')
network = local_response_normalization(network)
network = fully_connected(network, 512, activation='tanh', scope='fc1')
network = dropout(network, keep_prob, name='dropout1')
network = fully_connected(network, 512, activation='tanh', scope='fc2')
network = dropout(network, keep_prob, name='dropout2')
network = fully_connected(network, n_class, activation='softmax', scope='fc3')
network = regression(network,
optimizer='rmsprop',
loss='categorical_crossentropy',
learning_rate=learning_rate)
return network
def alexnet(input, num_class, transf_params_encoded=None):
if transf_params_encoded is None:
transf_params_encoded = define_layers(8)
transf_params = transfer_params_decode(transf_params_encoded)
network = conv_2d(input, 12, 11, strides=4, activation='relu', scope='conv1', restore=transf_params[0][0],
trainable=transf_params[0][1])
network = max_pool_2d(network, 3, strides=2, name='maxpool1')
network = local_response_normalization(network)
network = conv_2d(network, 32, 5, activation='relu', scope='conv2', restore=transf_params[1][0],
trainable=transf_params[1][1])
network = max_pool_2d(network, 3, strides=2, name='maxpool2')
network = local_response_normalization(network)
network = conv_2d(network, 48, 3, activation='relu', scope='conv3_1', restore=transf_params[2][0],
trainable=transf_params[2][1])
network = conv_2d(network, 48, 3, activation='relu', scope='conv3_2', restore=transf_params[3][0],
trainable=transf_params[3][1])
network = conv_2d(network, 32, 3, activation='relu', scope='conv3_3', restore=transf_params[4][0],
trainable=transf_params[4][1])
network = max_pool_2d(network, 3, strides=2, name='maxpool3')
network = local_response_normalization(network)
network = fully_connected(network, 512, activation='tanh', scope='fc1', restore=transf_params[5][0],
trainable=transf_params[5][1])
network = dropout(network, keep_prob, name='dropout1')
network = fully_connected(network, 512, activation='tanh', scope='fc2', restore=transf_params[6][0],
trainable=transf_params[6][1])
network = dropout(network, keep_prob, name='dropout2')
network = fully_connected(network, num_class, activation='softmax', scope='fc3', restore=transf_params[7][0],
trainable=transf_params[7][1])
return network | {
"repo_name": "migueldsw/TL-DA-TF",
"path": "NETWORKS/alexnet.py",
"copies": "1",
"size": "4145",
"license": "apache-2.0",
"hash": -7972507101517669000,
"line_mean": 54.28,
"line_max": 113,
"alpha_frac": 0.6728588661,
"autogenerated": false,
"ratio": 3.49789029535865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9653957536737885,
"avg_score": 0.003358324944153028,
"num_lines": 75
} |
""" Algebraic formalism for hard objects
Hard objects are defined as products of ``eta_i`` where ``i`` is
the index of a node of the graph on which the hard object is defined.
The ``eta`` elements are commuting and nilpotent.
They satisfy
``<eta_{i_1} ...eta_{i_k}> = 1``
if ``i_1,..,i_k`` are all different.
The generating function for counting hard objects is
``Z(t) = <Prod (1 + t*Prod eta_i)>``
"""
from densearith import (dup_lshift, dup_add, dup_mul, dup_mul_ground)
from densearith import dup_degree, dup_strip, dup_lshift
from active_nodes import ip_ordered_vertices, ip_list_objects_from_vlist
from compatibility import iteritems
from domains import ZZ
# 0 1 10 11, 100 101, 110, 111
_bits_set_table = [0, 1, 1, 2, 1, 2, 2, 3 ]
def count_bits_set(n):
"""
count the number of set bits in a number
"""
c = 0
while n:
c += _bits_set_table[n & 7]
n >>= 3
return c
def _is_free_var(i, k, m):
"""
i current row
k index of the variable
m matrix as a list of lists
returns true if the variable `k` does not occur from row `i` on
"""
for j in range(i, len(m)):
if m[j][k] != 0:
return False
return True
def _monom(n):
"""
monomial in `eta` variables from the number `n` encoding it
"""
v = []
i = 0
while n:
if n % 2:
v.append(i)
n = n >> 1
i += 1
return v
def _prm_mul(p1, p2, free_vars_indices, K):
"""
helper function for dup_permanental_minor_poly
Return the product of ``p1`` and ``p2``
Parameters
==========
p1 : polynomial
p2 : linear polynomial
free_vars_indices : list of variables not used anymore
Notes
=====
``p1`` is a polynomial in the ``x_i=t*eta_i`` elements, with polynomials
in ``t`` as coefficients;
``p2`` is a linear polynomial in the ``x_i`` elements, with numbers
as coefficients.
If after the product an ``eta-object``
is not anymore used, it is replaced by the variable ``t``
Examples
========
``p1 = (1 + 2*t) + (1+t)*x_0; p2 = 1 + x_0 + x_1``
``<p1*p2> = _prm_mul(p1, p2, [0, 1], ZZ)``
>>> from hobj import _prm_mul
>>> from domains import ZZ
>>> p1 = {0:[2, 1], 1:[1, 1]}
>>> p2 = {0:1, 1:1, 2:1}
>>> _prm_mul(p1, p2, [0, 1], ZZ)
{0: [1, 6, 5, 1]}
"""
p = {}
mask_free = 0
for i in free_vars_indices:
mask_free += 1 << i
if not p2:
return p
get = p.get
for exp1, v1 in iteritems(p1):
for exp2, v2 in p2.items():
if exp1 & exp2:
continue
exp = exp1 | exp2
v = dup_mul_ground(v1, v2, K)
if exp & mask_free:
for i in free_vars_indices:
if exp & (1 << i):
exp = exp ^ (1 << i)
v = dup_lshift(v, 1, K)
p[exp] = dup_add(get(exp, []), v, K)
return p
def _prm_mul_val(p1, p2, free_vars_indices, val):
p = {}
mask_free = 0
for i in free_vars_indices:
mask_free += 1 << i
if not p2:
return p
get = p.get
for exp1, v1 in iteritems(p1):
for exp2, v2 in p2.items():
if exp1 & exp2:
continue
exp = exp1 | exp2
v = v1*v2
if exp & mask_free:
for i in free_vars_indices:
if exp & (1 << i):
exp = exp ^ (1 << i)
v = v*val
p[exp] = get(exp, 0) + v
return p
def _get_poly_from_list(a, K):
"""
polynomial ``1 + sum eta_i a[i]``
"""
p = {0: K.one}
for i in range(len(a)):
if a[i]:
p[1<<i] = a[i]
return p
def _dup_permanental_minor_poly_val(m, K, val):
n = len(m)
ny = len(m[0])
p = _get_poly_from_list(m[0], K)
done_vars = set()
for i in range(1, n):
p1 = _get_poly_from_list(m[i], K)
free_vars_indices = []
for j in range(ny):
if j in done_vars:
continue
r = _is_free_var(i+1, j, m)
if r:
free_vars_indices.append(j)
done_vars.add(j)
p = _prm_mul_val(p, p1, free_vars_indices, val)
assert len(p) == 1
return p[0]
def dup_permanental_minor_poly(m, K, val=None):
"""
return the polynomial of the sum of permanental minors of a matrix ``m``
Let ``perm(i, m)`` be the sum of the permanents
of all ``i x i`` minors of ``m``
The polynomial of the sum of permanental minors is
``sum_{i=0}^{len(m)-1} perm(i, m)*x**i``
Parameters
==========
m : matrix in list form
val : value at which the polynomial is evaluated
Examples
========
>>> from hobj import dup_permanental_minor_poly
>>> from domains import ZZ
>>> m = [[2,1,2],[3,0,1],[1,1,2]]
>>> dup_permanental_minor_poly(m, ZZ)
[15, 36, 13, 1]
>>> dup_permanental_minor_poly(m, ZZ, 1)
65
"""
if val is not None:
return _dup_permanental_minor_poly_val(m, K, val)
n = len(m)
ny = len(m[0])
p = {0:[K.one]}
done_vars = set()
for i in range(n):
p1 = {0: K.one}
a = m[i]
for j in range(len(a)):
if a[j]:
p1[1<<j] = a[j]
free_vars_indices = []
for j in range(ny):
if j in done_vars:
continue
r = _is_free_var(i+1, j, m)
if r:
free_vars_indices.append(j)
done_vars.add(j)
p = _prm_mul(p, p1, free_vars_indices, K)
assert len(p) == 1
nv = [y for y in p[0]]
return nv
def _get_num_elements(objects):
""""
number of elements in `objects`
"""
s = set()
for t in objects:
for i in t:
s.add(i)
nvars = len(s)
b = list(sorted(s))
b.sort()
if b != list(range(nvars)):
raise ValueError('elements should be labelled in 0,...,nvars-1')
return nvars
def _poly_str(a):
"""
string representation of univariate polynomial
"""
n = dup_degree(a)
v = []
for i in range(n + 1):
c = a[i]
if not c:
continue
if i == n:
m = '1'
elif i == n - 1:
m = 't'
else:
m = 't^%d' % (n - i)
if c > 0:
if m != '1':
if c == 1:
if i == n:
sx = '%s' % m
else:
sx = '+%s' % m
else:
sx = '+%s*%s' % (c, m)
else:
sx = '+%s' % c
else:
sx = '%s*m' % (c, m)
v.append(sx)
s = ''.join(v)
return s
def hobj_str(p, noval=True):
"""
string representation of polynomial for hard objects
Parameters
==========
p : polynomial for hard objects
noval : if True the coefficients are put to ``1``
"""
a = []
items = list(p.items())
items.sort(key=lambda k: (count_bits_set(k[0]), _monom(k[0])))
for exp1, v1 in items:
if exp1 == 0:
if noval:
a.append('1')
else:
s = _poly_str(v1)
a.append(s)
else:
sx = '*'.join(['x%d' % ii for ii in _monom(exp1)])
if noval:
a.append('%s' % sx)
else:
s = _poly_str(v1)
a.append('%s*(%s)' %(sx, s))
s = ' + '.join(a)
return s
def hobj_list(p):
"""
return the list of lists representing independent sets
Examples
========
>>> from hobj import gen_hobj, hobj_list
>>> p = gen_hobj([(0,1),(1,2),(2,3),(3,4),(0,4)])
>>> hobj_list(p)
[[], [0], [1], [2], [3], [4], [0, 2], [0, 3], [1, 3], [1, 4], [2, 4]]
"""
items = list(p.items())
items.sort(key=lambda k: (count_bits_set(k[0]), _monom(k[0])))
a = [_monom(expv) for expv, y in items]
return a
def _gen_hobj_mul(p1, p2, free_vars_indices, K):
"""
helper for gen_hobj
"""
p = {}
mask_free = 0
for i in free_vars_indices:
mask_free += 1 << i
if not p2:
return p
get = p.get
for exp1, v1 in iteritems(p1):
for exp2, v2 in p2.items():
if exp1 & exp2:
continue
exp = exp1 | exp2
v = v1*v2
if exp & mask_free:
for i in free_vars_indices:
if exp & (1 << i):
exp = exp ^ (1 << i)
p[exp] = get(exp, 0) + v
return p
def gen_hobj(objects, vlist=None):
"""
polynomial enumerating all hard object configurations
Parameters
==========
objects : list of tuple of object element indices
vlist: list of object indices, a permutation of ``range(len(vlist))``
Notes
=====
TODO explain use of vlist (different naming of indices)
Examples
========
>>> from hobj import gen_hobj, hobj_str
>>> hobj_str(gen_hobj([(0,1),(1,2),(2,3),(3,4),(0,4)]))
'1 + x0 + x1 + x2 + x3 + x4 + x0*x2 + x0*x3 + x1*x3 + x1*x4 + x2*x4'
"""
from domains import ZZ
nvars = _get_num_elements(objects)
masks = []
done_vars = set()
one = 1
p = {0:one}
n = len(objects)
if not vlist:
vlist = list(range(n))
else:
assert list(sorted(vlist)) == list(range(n))
# use the bits up to `n` for the hard object variables;
# use the subsequent bits for the `eta` elements
for i in range(n):
expv = 0
for j in objects[i]:
expv = expv + (1 << (j+n))
masks.append(expv)
for i in range(n):
p1 = {0: one}
expv = masks[i] + (1 << vlist[i])
p1[expv] = one
free_vars = set()
for j in range(n, n + nvars):
if j in done_vars:
continue
hit = False
jmask = 1 << j
for mask in masks[i + 1:]:
if mask & jmask:
hit = True
break
if not hit:
done_vars.add(j)
free_vars.add(j)
p = _gen_hobj_mul(p, p1, free_vars, ZZ)
return p
def obj_free(objects):
"""
return a list of tuples ``(obj, free)``
"""
nvars = _get_num_elements(objects)
v = []
done = set()
n = len(objects)
for i in range(n):
free = []
obj = objects[i]
for j in range(nvars):
if j in done:
continue
hit = 0
for k in range(i + 1, n):
if j in objects[k]:
hit = 1
break
if not hit:
free.append(j)
done.add(j)
v.append((obj, free))
return v
class Hobj(object):
"""
class used with iadd_object, iadd_object_val
TODO: use if also with _monom, hobj_str; then put these functions as methods
"""
def __init__(self, pr=None):
self.links = []
self.dt = {}
self.freedt = list(range(1000, -1, -1))
self.pr = pr
def hobj_str(hob, p, noval=True):
"""
string representation of polynomial for hard objects
Parameters
==========
p : polynomial for hard objects
noval : if True the coefficients are put to ``1``
"""
dtinv = {}
for i, j in iteritems(hob.dt):
dtinv[j] = i
a = []
items = p.items()
items.sort(key=lambda k: (count_bits_set(k[0]), _monom(k[0])))
for exp1, v1 in items:
if exp1 == 0:
if noval:
a.append('1')
else:
s = _poly_str(v1)
a.append(s)
else:
t = _monom(exp1)
t = [dtinv[i] for i in t]
sx = '*'.join(['x%d' % ii for ii in t])
if noval:
a.append('%s' % sx)
else:
s = _poly_str(v1)
a.append('%s*(%s)' %(sx, s))
s = ' + '.join(a)
return s
def hobj_list(hob, p):
"""
return the list of lists representing independent sets
Examples
========
TODO
"""
dtinv = {}
for i, j in iteritems(hob.dt):
dtinv[j] = i
items = p.items()
items.sort(key=lambda k: (count_bits_set(k[0]), _monom(k[0])))
a = [[dtinv[i] for i in _monom(expv)] for expv, y in items]
return a
def iadd_object(hb, p, val, obj, free, K):
"""
multiply ``p`` by ``(1 + t*val*eta_i*eta_j)``
Notes
=====
``p`` is changed only if ``free`` is empty
``free`` is the list of indices of ``eta`` elements which
are integrated (that is, put to ``1`` after performing the product).
Examples
========
>>> from domains import ZZ
>>> from hobj import Hobj, obj_free
>>> p = {0: [ZZ.one]}
>>> hb = Hobj()
>>> a = [(0, 1), (1, 2), (2, 3), (3, 4), (4, 0)]
>>> a = obj_free(a); a
[((0, 1), []), ((1, 2), [1]), ((2, 3), [2]), ((3, 4), [3]), ((4, 0), [0, 4])]
>>> for t, free in a:
... p = hb.iadd_object(p, 1, t, free, ZZ)
...
>>> p[0]
[5, 5, 1]
"""
links = hb.links
dt = hb.dt
freedt = hb.freedt
exp2 = 0
for i in obj:
if i in dt:
j = dt[i]
else:
j = freedt.pop()
dt[i] = j
exp2 += 1 << j
free0 = free
free = [dt[i] for i in free]
t = tuple(sorted(obj))
if t in links:
raise ValueError('%s in %s' %(t, links))
links.append(t)
if free:
p1 = p
p = {}
mask_free = 0
for i in free:
mask_free += 1 << i
get = p.get
a = [1, (val, exp2)]
for exp1, v1 in iteritems(p1):
for ii in range(2):
if not ii:
exp = exp1
v = v1
else:
if exp1 & exp2:
continue
exp = exp1 | exp2
if val != 1:
v1 = dup_mul_ground(v1, val, K)
v = dup_lshift(v1, 1, K)
if exp & mask_free:
for i in free:
if exp & (1 << i):
exp = exp ^ (1 << i)
p[exp] = dup_add(get(exp, []), v, K)
for exp in free:
freedt.append(exp)
return p
else:
a = []
for exp1, v1 in iteritems(p):
if exp1 & exp2:
continue
exp = exp1 | exp2
v1 = dup_mul_ground(v1, val, K)
v = dup_lshift(v1, 1, K)
try:
p[exp] = dup_add(p[exp], v, K)
except KeyError:
a.append((exp, v))
for exp, v in a:
p[exp] = v
return p
def iadd_object_val(hb, p, val, obj, free, K, pr=None):
"""
multiply ``p`` by ``(1 + t*val*eta_i*eta_j)``
Notes
=====
``p`` is in general changed.
``free`` is the list of indices of ``eta`` elements which
are integrated (that is, put to ``1`` after performing the product).
Examples
========
"""
links = hb.links
dt = hb.dt
freedt = hb.freedt
exp2 = 0
for i in obj:
if i in dt:
j = dt[i]
else:
j = freedt.pop()
dt[i] = j
exp2 += 1 << j
free0 = free
free = [dt[i] for i in free]
t = tuple(sorted(obj))
if t in links:
raise ValueError('%s in %s' %(t, links))
links.append(t)
if free:
p1 = p
p = {}
mask_free = 0
for i in free:
mask_free += 1 << i
get = p.get
a = [1, (val, exp2)]
for exp1, v1 in iteritems(p1):
for ii in range(2):
if not ii:
exp = exp1
v = v1
else:
if exp1 & exp2:
continue
exp = exp1 | exp2
if val != 1:
v = v1*val
if exp & mask_free:
for i in free:
if exp & (1 << i):
exp = exp ^ (1 << i)
c = get(exp, 0) + v
if pr:
c = c % pr
p[exp] = c
for exp in free:
freedt.append(exp)
return p
else:
a = []
for exp1, v1 in iteritems(p):
if exp1 & exp2:
continue
exp = exp1 | exp2
v = v1 * val
if pr:
v = v % pr
try:
p[exp] += v
except KeyError:
a.append((exp, v))
for exp, v in a:
p[exp] = v
return p
def d_relabel(d):
dt = {}
keys = list(d.keys())
dt = dict(zip(keys, range(len(d))))
d1 = {}
for k, v in d.items():
d1[dt[k]] = [dt[i] for i in v]
return d1, dt
def dup_gen_count_hobj(objects, K, val=None, pr=None):
"""
Counting polynomial for hard object from a list of edges
Parameters
==========
objects : list of tuples of element indices
TODO add docs
Notes
=====
The counting polynomial for hard object is
``M(x) = sum_i N(i)*x**i``
where ``N(i)`` is the number of ways in which ``i`` dimers can be
put in the graph.
In the case of dimers it is the matching generating polynomial, which
is related to the matching polynomial ``mu`` by the relation
``mu(x) = x**v*M(-x**2)``
where ``v`` is the number of vertices of the graph.
For efficiency reasons the list of edges should be appropriately
ordered to reduce the number of active nodes.
Examples
========
>>> from domains import ZZ
>>> from hobj import dup_gen_count_hobj
>>> dup_gen_count_hobj([(1, 0), (2, 1), (3, 2), (4, 0), (4, 3)], ZZ)
[5, 5, 1]
>>> dup_gen_count_hobj([(1, 0), (2, 1), (3, 2), (4, 0), (4, 3)], ZZ, val=1)
11
"""
a = obj_free(objects)
hb = Hobj(pr=pr)
if val is None:
if pr:
raise NotImplementedError
p = {0: [K.one]}
for obj, free in a:
p = hb.iadd_object(p, 1, obj, free, K)
else:
p = {0: K.one}
for obj, free in a:
p = hb.iadd_object_val(p, val, obj, free, K, pr)
assert len(p) == 1
return p[0]
def dup_matching_generating_poly(d, val=None, pr=None, links=None, K=ZZ):
"""
Return the matching polynomial for the graph defined by ``d``
Parameters
==========
d : dict for the graph
val : evaluate the polynomial in ``val``
pr : evaluate the polynomial modulo the prime ``pr``
links : list of edges of the graph
Notes
=====
The dictionary ``d`` associates to each vertex of the graph the list
of its neighbours.
A simple greedy algorithm tries to find an efficient ordering of
vertices to compute the independence polynomial.
For graphs which are not biconnected, in general this greedy
algorithm does not work.
For biconnected graphs, there is no guarantee that an efficient
ordering of the links is found.
In either case, one can provide explicitly the parameter `links`.
Examples
========
>>> from hobj import dup_matching_generating_poly
>>> d = {0:[1,4], 1:[0,2], 2:[1,3], 3:[2,4], 4:[0,3]}
>>> dup_matching_generating_poly(d)
[5, 5, 1]
>>> d = {0:[4], 1:[2,4], 2:[1,3], 3:[2,4], 4:[0,1,3]}
>>> links = [(0,4),(1,4),(3,4),(1,2),(2,3)]
>>> dup_matching_generating_poly(d, links=links)
[4, 5, 1]
"""
from active_nodes import ordered_links
if list(sorted(d.keys())) != list(range(len(d))):
d, dt = d_relabel(d)
if links:
links = [[dt[k] for k in obj] for obj in links]
if not links:
k0 = 0
links = [k0, d[k0][0]]
ord_links = ordered_links(d, *links)
else:
ord_links = links
num_edges = sum([len(v) for v in d.values()]) // 2
if num_edges != len(ord_links):
raise ValueError('wrong number of links')
p = dup_gen_count_hobj(ord_links, K, val, pr)
return p
def dup_independence_poly(d, val=None, pr=None, links=None, vlist=None, K=ZZ):
"""
Return the independence polynomial for the graph defined by ``d``
Parameters
==========
d : dict for the graph
val : evaluate the polynomial in ``val``
pr : evaluate the polynomial modulo the prime ``pr``
links : list of vertices of the graph forming a path
vlist : list of vertices of the graph
Notes
=====
The dictionary ``d`` associates to each vertex of the graph the list
of its neighbours.
A simple greedy algorithm tries to find an efficient ordering of
vertices to compute the independence polynomial.
For graphs which are not biconnected, in general this greedy
algorithm does not work.
For biconnected graphs, there is no guarantee that an efficient
ordering of the vertices is found.
In either case, one can provide explicitly the parameter `vlist`.
Alternatively, for biconnected graphs, one can
provide an initial path ``links``;
for instance in a long rectangle ``(m, n)``, with ``m`` much greater
than ``n``, the path can be a short side ``n``;
in this case the greedy algorithm deduces an efficient ordering
of vertices, that is it adds short vertical lines of vertices
till all the rectangle is obtained; the number of active nodes is
``nu=n``.
Giving no hint, the algorithm might start with an horizontal line
of vertices, and continue adding horizontal lines; then the number
of active nodes would be ``nu=m``. The complexity of the
computation of the independence polynomial depending on ``2**nu``,
it would take much longer.
Examples
========
>>> from hobj import dup_independence_poly
>>> d = {0:[1,3], 1:[0,2], 2:[1,3], 3:[0,2]}
>>> dup_independence_poly(d)
[2, 4, 1]
"""
bd = True
if list(sorted(d.keys())) != list(range(len(d))):
d, dt = d_relabel(d)
if not vlist:
if not links:
k0 = 0
links = [k0, d[k0][0]]
vlist = ip_ordered_vertices(d, *links)
if len(d) != len(vlist):
raise ValueError('vlist has not all the vertices of the graph')
objects = ip_list_objects_from_vlist(d, vlist)
p = dup_gen_count_hobj(objects, K, val, pr)
return p
def independent_sets_gen(d):
"""
Generator for the independent sets
"""
vlist = ip_ordered_vertices(d)
objects = ip_list_objects_from_vlist(d, vlist)
p = gen_hobj(objects, vlist)
for expv, _ in iteritems(p):
yield _monom(expv)
def independence_sets(d):
"""
list of independence sets of vertices of the graph with dict ``d``.
Notes
=====
The dictionary ``d`` associates to each vertex of the graph the list
of its neighbours.
Examples
========
>>> from hobj import independence_sets
>>> d = {0:[1,3], 1:[0,2], 2:[1,3], 3:[0,2]}
>>> independence_sets(d)
[[], [0], [1], [2], [3], [0, 2], [1, 3]]
"""
vlist = ip_ordered_vertices(d)
objects = ip_list_objects_from_vlist(d, vlist)
p = gen_hobj(objects, vlist)
a = hobj_list(p)
return a
if __name__ == "__main__":
import doctest
import sys
if sys.version_info < (2, 6):
print('doctests require Fraction, available from Python2.6')
sys.exit()
doctest.testmod()
| {
"repo_name": "pernici/hobj",
"path": "src/hobj.py",
"copies": "1",
"size": "24509",
"license": "bsd-3-clause",
"hash": 353776980326510900,
"line_mean": 26.2322222222,
"line_max": 85,
"alpha_frac": 0.4682361581,
"autogenerated": false,
"ratio": 3.4393769295537466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44076130876537467,
"avg_score": null,
"num_lines": null
} |
'''algebric relationship framework using multimethods
'''
from __future__ import absolute_import
from functools import partial
try:
from numpy import number as numpy_number_type
except ImportError:
numpy_number_type = None
from jamenson.runtime.multimethod import MultiMethod, defmethod, defboth_wrapper
from jamenson.runtime.atypes import anytype, as_optimized_type
from jamenson.runtime.func import identity
__all__ = '''
unop_names binop_names AlgebraBase scalar_number_type
'''.split()
scalar_number_type = as_optimized_type(tuple(filter(None, (int,long,float,numpy_number_type))))
unop_names = '''
neg pos float abs
'''.split()
__all__ += unop_names
binop_names = '''
add mul sub div truediv mod pow
lshift rshift or and
lt le eq ge gt ne
'''.split()
__all__ += binop_names
def construct_multimethods():
'''Create all of the algebric mutlimethods (e.g. mm_add)
'''
gbls = globals()
global mm_unop_base
mm_unop_base = MultiMethod(name='mm_unop_base')
for name in unop_names:
mm_name = 'mm_' + name
gbls[mm_name] = MultiMethod(name=mm_name,
doc='''multimethod for unary operation %s
''' % (name,),
inherit_from=[mm_unop_base])
global mm_binop_base
mm_binop_base = MultiMethod(name='mm_binop_base')
for name in binop_names:
mm_name = 'mm_' + name
gbls[mm_name] = MultiMethod(name=mm_name,
doc='''multimethod for binary operation %s
''' % (name,),
inherit_from=[mm_binop_base])
gbls['defboth_mm_' + name] = partial(defboth_wrapper, gbls[mm_name])
construct_multimethods()
del construct_multimethods
class AlgebraBase(object):
'''Base class that translates Python algebric operations (e.g. __add__)
into multimethod calls (e.g. mm_add)
'''
def construct_methods(locs=locals()):
gbls = globals()
def make_wrapper(name, func):
meth_name = '__%s__' % name
func.func_name = meth_name
locs[meth_name] = func
def make_unop_wrapper(name):
mm = gbls['mm_%s' % name]
make_wrapper(name, lambda op: mm(op))
def make_binop_wrapper(name):
mm = gbls['mm_%s' % name]
make_wrapper(name, lambda lop, rop: mm(lop, rop))
def make_binrop_wrapper(name):
mm = gbls['mm_%s' % name]
make_wrapper('r'+name, lambda rop, lop: mm(lop, rop))
for name in unop_names:
make_unop_wrapper(name)
for name in binop_names:
make_binop_wrapper(name)
make_binrop_wrapper(name)
construct_methods()
del construct_methods
# Default multimethod behavior
@defmethod(mm_unop_base, [anytype])
def meth(op):
return NotImplemented
@defmethod(mm_binop_base, [anytype, anytype])
def meth(lop, rop):
return NotImplemented
@defmethod(mm_eq, [AlgebraBase, AlgebraBase])
def meth(a,b):
return a is b
@defmethod(mm_ne, [AlgebraBase, AlgebraBase])
def meth(a,b):
return not mm_eq(a,b)
@defmethod(mm_sub, [AlgebraBase, scalar_number_type])
def meth(a,s):
return mm_add(a, -s)
@defmethod(mm_div, [AlgebraBase, scalar_number_type])
def meth(a,s):
return mm_mul(a, 1.0 / float(s))
class DivAlgebraBase(AlgebraBase):
'''Base class where truediv is delegated out to div
(i.e. truediv is semantically equivalent to div)
'''
@defboth_wrapper(mm_truediv, [DivAlgebraBase, anytype])
def meth(a, b):
return mm_div(a, b)
| {
"repo_name": "matthagy/physmath",
"path": "physmath/algebra.py",
"copies": "1",
"size": "3665",
"license": "apache-2.0",
"hash": -4824002952066422000,
"line_mean": 25.1785714286,
"line_max": 95,
"alpha_frac": 0.6054570259,
"autogenerated": false,
"ratio": 3.31374321880651,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44192002447065093,
"avg_score": null,
"num_lines": null
} |
'''Algebric Type System
Supports arbriarty types rules as well as algebric combinations through
set joins and complements. Designed to extend the Python type system to
incorporate a more robust definition of types. This is espically useful
for multimethods.
Examples:
In[0]: from jamenson.runtime.atypes import *
In[2]: py_numbers = as_type((int, long, float, complex))
In[2]: py_numbers
Out[2]: oneof((int), (long), (float), (complex))
Defines a composite type that matches any of the
basic python number types. The semantics of this type is
OneOf([IsInstanceType(set([int])),
IsInstanceType(set([long])),
IsInstanceType(set([float])),
IsInstanceType(set([complex]))])
This can be simplified with the function `optimize_type
In[3]: opt_py_numbers = optimize_type(py_numbers)
Out[3]: (int,long,complex,float)
i.e. IsInstanceType(set([int,long,float,complex]))
One can also use type definitions beyond Python types
In[4]: odd_int_t = as_optimized_type(intersection(union(int,long), lambda x: x%2==1))
In[5]: typep(7, odd_int_t)
Out[6]: True
In[7]: typep(6L, odd_int_t)
Out[7]: False
In[8]: typep([1,2], odd_int_t)
Out[8]: False
The last example is important, in that type searches are performed left-to-right
and depth first. In this example, this allows us to assume that the union(int,long)
type has been satisfied and we can saftely perform arithmetic without worry of a TypeError.
'''
from __future__ import absolute_import
from functools import partial
from ..func import identity, noop, compose
from ..collections import OrderedDict, OrderedSet
from .bsclasses import (TypeKeyerType, OneOf, TypeBase, KeyerBase, absolute_import,
type_keyer, IsInstanceType, JoinBase, bsclasses_names_spaces)
from .common import worst_score, best_score, no_score
from .common import atypes_multimethods_interface
from ..multimethod import MultiMethod, defmethod, defboth_wrapper, around
from .. import multimethod as MM
__all__ = atypes_multimethods_interface + '''
as_type as_string eq_types
IsInstanceType union
optimize_type
typep
'''.split()
# # # # # # #
# Base Type #
# # # # # # #
#import jamenson.runtime.atypes.bsclasses
#bsclasses = jamenson.runtime.atypes.bsclasses
#from jamenson.runtime.atypes.bsclasses import *
#this multimethod extends beyond the scope of atypes
#and is used for generic string representation of all objects
#in the jamenson runtime
as_string = MultiMethod(name='as_string',
#signature='object',
doc='''string representation of arbitrary systems
''')
type_name = MultiMethod(name='type_name',
#signature='object',
doc='''returns a string the meaningfully names
what this type coresponds to
''')
eq_types = MultiMethod(name='eq_types',
#signature='object,object',
doc='''whether two types instances have the same
semantical meaning
''')
hash_type = MultiMethod(name='hash_type',
#signature='object',
doc='''calculate a hash s.t. two types that are equal through eq_types
will have the same hash key
''')
@defmethod(as_string, [TypeBase])
def meth(op):
return op.__class__.__name__
@defmethod(type_name, [TypeBase])
def meth(op):
return as_string(op)
@defmethod(hash_type, [TypeBase])
def meth(op):
return hash(id(op))
defeq = partial(defboth_wrapper, eq_types)
@defeq([TypeBase,TypeBase])
def meth(a,b):
#return False #`is situation already handled in __eq__
#rehandle it anyways, when eq_types called directly
return a is b
# # # # # # # # #
# Type Methods #
# # # # # # # # #
@defmethod(as_string, [IsInstanceType])
def meth(op):
return '(%s)' % ','.join(tp.__name__ for tp in op.types)
@defeq([IsInstanceType,IsInstanceType])
def meth(a,b):
return a.types == b.types
@defmethod(hash_type, [IsInstanceType])
def meth(op):
return hash(frozenset(op.types))
@defmethod(as_string, [JoinBase])
def meth(op):
return '%s(%s)' % (op.name, ', '.join(map(as_string, op.inners)))
@defmethod(eq_types, [JoinBase,JoinBase])
def meth(a,b):
if a.__class__ is not b.__class__:
return NotImplemented
return a.inners == b.inners
@defmethod(hash_type, [JoinBase])
def meth(op):
return hash(tuple(op.inners))
# # # # # #
# as type #
# # # # # #
as_type = MultiMethod(name='as_type',
#signature='object',
doc='''convert arbitrary objects to types
''')
defmethod(as_type, [TypeBase])(identity)
defmethod(as_type, [type(type)])(IsInstanceType)
defmethod(as_type, [tuple])(OneOf)
# # # # # # # # # # # #
# Algebric Operations #
# # # # # # # # # # # #
# these operations are designed to be as simple as possible
# optimizations and higher level interfaces, built
# upon these simple operations, are provied below
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
union_two = MultiMethod(name='union_two',
#signature='TypeBase,TypeBase',
doc='''calculates the union of two types
''')
def union(*inners):
'''union of all types, this is optimized by by optimize_type
'''
return reduce(union_two, map(as_type, inners))
defunion2 = partial(defboth_wrapper, union_two)
@defunion2([TypeBase,TypeBase])
def meth(a,b):
return OneOf(OrderedSet([a,b]))
@defunion2([IsInstanceType,IsInstanceType])
def meth(a,b):
return IsInstanceType(*(a.types | b.types))
# order of these operations matters, so as to preserve or
# order in join
defmethod(union_two, [TypeBase, OneOf])
def meth(a,o):
return OneOf(OrderedSet([a]) | o.inners)
defmethod(union_two, [OneOf, TypeBase])
def meth(o,a):
return OneOf(o.inner | OrderedSet([a]))
# # # # # # # # #
# Optimizations #
# # # # # # # # #
optimize_type = MultiMethod('optimize_type',
#signature='TypeBase',
doc='''optimize type
may return an entirely different type, or a variant of the existing type,
or the existing type when no optimization is possible
''')
def as_optimized_type(op):
return optimize_type(as_type(op))
defmethod(optimize_type, [TypeBase])(identity)
# # # # # # # # # #
# Join Reduction #
# # # # # # # # # #
# simplify joins by pairwise reduction of elemnts in set.
# each reduction converts a pair of types into a single type.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# define a new method combination for reduction of join elements.
# each method attempt to reduce the two elements to a single type.
# when this is not possible the method should return None.
# we then attempt the next method in this combination until one
# returns a value that is not None or we reach the end of the method
# sequence.
combine_join_reduce = MM.CombinationType('join_reduce')
def compile_join_reduce(mm, method, last_func):
func = method.func
if last_func is None:
return func
@MM.wrapper_fixup(mm)
def wrap(a,b):
red = func(a,b)
if red is not None:
return red
return last_func(a,b)
return wrap
MM.combination_compilers[combine_join_reduce] = compile_join_reduce
union_pair_reduce = MultiMethod('union_pair_reduce',
doc='''
reduce sets of elements in unions by
pairwise reduction
''', default_combination=combine_join_reduce)
defunionreduce = partial(defboth_wrapper, union_pair_reduce)
def combinate_reduce_join(reducer, op):
'''Reduce combinations through pairwsie reduction.
T all combinations; including reducer(a,b) for all a and b s.t. a is not b
'''
inners = list(op.inners)
for i,a in enumerate(inners):
for b in inners[i+1:]:
r = reducer(a,b)
if r is not None:
new_inners = op.inners.copy()
#remove before add, incase r is eqal to orignal
new_inners.remove(a)
new_inners.remove(b)
#insert at position of first one
new_inners.insert(i, r)
return combinate_reduce_join(reducer, op.__class__(new_inners))
return op
@defmethod(optimize_type, [OneOf], combination=around)
def meth(callnext, op):
return callnext(combinate_reduce_join(union_pair_reduce, op))
# # # # # # # # # # # # # # # # # # #
# pairwise reduction of join pairs #
# # # # # # # # # # # # # # # # # # #
@defunionreduce([TypeBase, TypeBase])
def meth(a,b):
if eq_types(a,b):
return a
return None
@defunionreduce([IsInstanceType, IsInstanceType])
def meth(a,b):
return IsInstanceType(*(a.types | b.types))
# # # # #
# typep #
# # # # #
typep = MultiMethod(name='typep',
#signature='object, TypeBase',
doc='''test whether an instance corresponds to a type specifiier''')
@defmethod(typep, [object, IsInstanceType])
def meth(op, tp):
return isinstance(op, tuple(tp.types))
@defmethod(typep, [object, OneOf])
def meth(op, tp):
for etp in tp.inners:
if typep(op, etp):
return True
return False
# # # # # # # # # # # #
# Keyers and Scoring #
# # # # # # # # # # # #
# A keyer converts an object to cannonical type key
# that can be used to know whether or not an object
# matches a type specification
# # # # # # # # # # # # # # # # # # # # # # # # # # #
get_type_keyer = MultiMethod(name='get_type_keyer',
#signature='TypeBase',
doc='''returns the object that describes the rule by which instances
can be typed in a canonical way to determine if they follow this type
rule and the strength (score) of which they follow it
''')
get_key_scorer = MultiMethod(name='get_key_scorer',
#signature='TypeBase',
doc='''given a type, returns a function that can score a specific key
generated from its keyer
''')
# # # # # #
# Keyers #
# # # # # #
keyer_getfunc = MultiMethod(name='keyer_getfunc',
#signature='KeyerBase',
doc='''returns a function that can generate a key that is unique
with regard to every variant of this type
''')
defmethod(keyer_getfunc, [TypeKeyerType])(lambda x: type)
# isinstace scorer
def score_worst(key): return worst_score
def score_none(key): return no_score
@defmethod(get_type_keyer, [IsInstanceType])
def meth(op):
return type_keyer
def isinstance_scorer(types, key_type):
acc = no_score
for x in types:
try:
score = key_type.mro().index(x)
except ValueError:
continue
except TypeError:
if x is type and issubclass(key_type, x):
score = best_score
else:
score = best_score if issubclass(x, key_type) else no_score
if acc is no_score:
acc = score
else:
acc = min(acc, score)
return acc
@defmethod(get_key_scorer, [IsInstanceType])
def meth(tp):
return partial(isinstance_scorer, tp.types)
# algebric relationships
def score_one_of(inners, keys):
acc = no_score
for inner,op in zip(inners, keys):
score = inner(op)
if score is no_score:
continue
if acc is no_score or score < acc:
acc = score
return acc
@defmethod(get_key_scorer, [OneOf])
def meth(op):
return partial(score_one_of, map(get_key_scorer, op.inners))
# composition of types
flatten_keyer = MultiMethod('flatten_keyer')
flatten_type_key = MultiMethod('flatten_type_key')
@defmethod(flatten_keyer, [OrderedDict, KeyerBase])
def meth(mapping, keyer):
try:
return mapping[keyer]
except KeyError:
index = mapping[keyer] = len(mapping)
return index
@defmethod(flatten_type_key, [OrderedDict, TypeBase])
def meth(mapping, tp):
return flatten_keyer(mapping, get_type_keyer(tp))
@defmethod(flatten_type_key, [OrderedDict, JoinBase])
def meth(mapping, join):
return map(partial(flatten_type_key, mapping), join.inners)
def compose_types_scorer(tps):
'''generates a composite type to both uniquely key and determine the score
for set of types
'''
#break cyclic dependency of `compose_types_scorer and `calculate_method
diff_types = set(map(type, tps))
if len(diff_types) == 1:
for tp in diff_types:
pass
try:
keyer,make_scorer = hard_coded_composers[tp]
except KeyError:
pass
else:
return keyer, map(make_scorer, tps)
key_mapping = OrderedDict()
indices = [flatten_type_key(key_mapping, tp) for tp in tps]
keyers = list(key_mapping)
if not keyers:
#happens when everyone is agnostic
return noop, [score_worst]*len(tps)
keyer_funcs = map(keyer_getfunc, keyers)
scorers = map(get_key_scorer, tps)
assert len(indices) == len(scorers)
if len(keyers) == 1 and all([isinstance(x,int) for x in indices]):
return list(keyer_funcs)[0], scorers
return (lambda x : tuple([keyer_func(x) for keyer_func in keyer_funcs]),
[make_indexer(xindices,scorer)
for xindices,scorer in zip(indices, scorers)])
hard_coded_composers = {
IsInstanceType : [type, lambda tp: partial(isinstance_scorer, tp.types)],
}
def getanitem(i, scorer, key): return scorer(key[i])
def rec_make_tree(indices,key):
if isinstance(indices, int):
return key[indices]
return [rec_make_tree(index,key) for index in indices]
def getatree(indices, scorer, key): return scorer(rec_make_tree(indices, key))
def make_indexer(indices, scorer):
if isinstance(indices, int):
return partial(getanitem, indices, scorer)
return partial(getatree, indices, scorer)
#apply new methods to classes
#from jamenson.runtime.atypes import bsclasses
def wire():
for name in 'as_string eq_types hash_type as_type'.split():
bsclasses_names_spaces[name] = globals()[name]
wire()
del wire
def hack_to_reoptimize_all_method_types():
from jamenson.runtime.atypes.cold import Type
def opt(x): return x if isinstance(x, Type) else optimize_type(x)
for k,v in globals().iteritems():
if isinstance(v, MultiMethod):
for m in v.methods:
m.type_sig.types = map(opt, m.type_sig.types)
| {
"repo_name": "matthagy/Jamenson",
"path": "jamenson/runtime/atypes/_atypes.py",
"copies": "1",
"size": "15179",
"license": "apache-2.0",
"hash": -5570458838699171000,
"line_mean": 30.5571725572,
"line_max": 103,
"alpha_frac": 0.6001712893,
"autogenerated": false,
"ratio": 3.574894017899199,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4675065307199199,
"avg_score": null,
"num_lines": null
} |
"""alge URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from .settings import MEDIA_ROOT, MEDIA_URL
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^api/', include('api.urls')),
url(r'^', include('strecklista.urls')),
url(r'^admin/', admin.site.urls, name="admin"),
#Login/logout views for browsable api
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
# Password reset
url(r'^user/password/', include('alge.password_reset')),
# Media files
url(r'^media/', include('ProtectedServe.urls')),
]
| {
"repo_name": "teknologkoren/Strequelistan",
"path": "alge/urls.py",
"copies": "1",
"size": "1195",
"license": "mpl-2.0",
"hash": -7276990372810533000,
"line_mean": 33.1428571429,
"line_max": 83,
"alpha_frac": 0.6811715481,
"autogenerated": false,
"ratio": 3.4941520467836256,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4675323594883626,
"avg_score": null,
"num_lines": null
} |
alg = 'mf'
nu = 480189
nv = 17770
traindata='~/works/data/netflix_protobuf_train_4by500'
testdata='~/works/data/netflix_protobuf_valid'
#nu = 1000990
#nv = 624961
#traindata='~/works/data/yahoo_protobuf_train_4by500'
#testdata='~/works/data/yahoo_protobuf_valid'
it=10
fly=4
dim=2048
#mf
eta=2.4e-2
lam=4e-2
#dpmf
eps=0.0
tau=0
mineta=2e-13
#admf
eta_reg=2e-2
validdata='~/works/data/netflix_protobuf_valid'
#validdata='~/works/data/yahoo_protobuf_valid'
import os
import sys
for eta in [4e-2]:
for eta_reg in [5e-1]:
for temp in [1e-1]:
for gam in [1.0]:
for dim in [16]:
print './mf --alg %s --train %s --test %s --valid %s --nu %d --nv %d --eta %e --lambda %e --gam %f --result %s --iter %d --dim %d --fly %d --epsilon %f --tau %d --temp %e --mineta %e --eta_reg %e'%(alg,traindata,testdata,validdata,nu,nv,eta,lam,gam,'%s_dim%d'%(alg,dim),it,dim,fly,eps,tau,temp,mineta,eta_reg)
sys.stdout.flush()
os.system('./mf --alg %s --train %s --test %s --valid %s --nu %d --nv %d --eta %e --lambda %e --gam %f --result %s --iter %d --dim %d --fly %d --epsilon %f --tau %d --temp %e --mineta %e --eta_reg %e'%(alg,traindata,testdata,validdata,nu,nv,eta,lam,gam,'%s_dim%d'%(alg,dim),it,dim,fly,eps,tau,temp,mineta,eta_reg))
sys.stdout.flush()
| {
"repo_name": "dmlc/experimental-mf",
"path": "src/run.py",
"copies": "1",
"size": "1303",
"license": "apache-2.0",
"hash": -5188413155825594000,
"line_mean": 30.7804878049,
"line_max": 324,
"alpha_frac": 0.6185725249,
"autogenerated": false,
"ratio": 2.3908256880733947,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8091901624786846,
"avg_score": 0.08349931763730993,
"num_lines": 41
} |
"""AlgoContest URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from FirstApp import views as first_app_views
urlpatterns = [
# 获取网页方法
url(r'^$', first_app_views.index_page),
# actions
url(r'^forget_action', first_app_views.forget_action),
url(r'^login_action', first_app_views.login_action),
url(r'^reset_password_action', first_app_views.reset_password_action),
url(r'^get_back_mypass_action', first_app_views.get_back_mypass_action),
url(r'^delete_myteam_action', first_app_views.delete_team_action),
url(r'^register_action', first_app_views.register_action),
url(r'^logout_action', first_app_views.logout_action),
url(r'^modify_action', first_app_views.modify_action),
# pages
url(r'^login_page', first_app_views.login_page),
url(r'^register_page', first_app_views.register_page),
url(r'^team_info_page', first_app_views.team_info_page),
url(r'^info_page', first_app_views.info_page),
url(r'^myteam_page', first_app_views.my_team_page),
url(r'^changepass_page', first_app_views.change_pass_page),
url(r'^forget_page', first_app_views.forget_page),
url(r'^change_team_info_page', first_app_views.change_team_info_page),
url(r'^create_team_info_page', first_app_views.create_team_info_page),
url(r'^get_captcha_code', first_app_views.get_captcha_code),
url(r'^validate_captcha', first_app_views.validate_captcha),
url(r'^download_info', first_app_views.download_info),
url(r'^school_info', first_app_views.school_info),
url(r'^excel_export', first_app_views.excel_export),
url(r'^findpass', first_app_views.get_back_mypass_page),
url(r'^admin/', admin.site.urls),
url(r'^404', first_app_views.forbid),
url(r'^', first_app_views.forbid),
]
from AlgoContest import settings
import os
from django.conf.urls.static import static
if settings.DEBUG:
media_root = os.path.join(settings.BASE_DIR, 'static')
urlpatterns += static('/static/', document_root=media_root)
| {
"repo_name": "xudianc/AlgoContest",
"path": "AlgoContest/urls.py",
"copies": "1",
"size": "2654",
"license": "mit",
"hash": -4128191568891630600,
"line_mean": 41.6129032258,
"line_max": 79,
"alpha_frac": 0.6949280848,
"autogenerated": false,
"ratio": 3.10093896713615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42958670519361497,
"avg_score": null,
"num_lines": null
} |
"""Algolia is a funny word."""
from algoliasearch import algoliasearch
class AlgoliaHelper(object):
"""Handles the Algolia stuff."""
def __init__(self):
self.app_id = 'PBF4ZR3KBT'
self.api_key = '9188cd13a0dbf3d0af949802b0e31489' # search-only
self.index_name = 'ubuntu_irc_logs'
# Restricted search (for acquiring metadata).
self.restricted_search = {
'page': 1,
'hitsPerPage': 1,
'length': 1
}
# General search.
self.general_search = {
'highlightPreTag': '',
'highlightPostTag': '',
'hitsPerPage': 1000
}
# Keep it simple.
self.client = algoliasearch.Client(self.app_id, self.api_key)
self.index = self.client.init_index(self.index_name)
def get_channels(self):
"""Get a list of indexed channels."""
criteria = {'facets': 'channel'}
results = self.index.search(
'',
dict(self.restricted_search, **criteria)
)
return results['facets']['channel'].keys()
def do_a_search(self, query, criteria):
"""Execute a generic search based on some criteria."""
results = self.index.search(
query,
dict(self.general_search, **criteria)
)
return results
def get_irc_logs(self, timestamp, channel):
"""Search for IRC logs given a datestamp and criteria."""
# Because we're only interested in exact timestamp hits, we need the
# ranking info in order to filter out inexact results.
criteria = {
'facetFilters': ['channel:' + channel],
'getRankingInfo': 1
}
results = self.index.search(
timestamp,
dict(self.general_search, **criteria)
)
returnable = []
for hit in results['hits']:
if hit['_rankingInfo']['proximityDistance'] <= 3:
returnable.append('[' + hit['datestamp'] + '] <' + \
hit['username'] + '> ' + hit['message'])
return returnable
def get_most_recent_user_stamp(self, username):
"""Get the timestamp of the most recent log line for a user."""
criteria = {
'facetFilters': ['username:' + username],
'getRankingInfo': 1
}
results = self.index.search(
'',
dict(self.general_search, **criteria)
)
# The datestamp field is ordered ascending, but we want the last one,
# so we have to get the entire result list then pick the final item.
returnable = results['hits'][(results['nbHits'] - 1)]['datestamp']
return returnable
def get_userinfo(self, username):
"""Get information about a user."""
criteria = {
'getRankingInfo': 1,
'facets': '*',
}
results = self.index.search(
username,
dict(self.restricted_search, **criteria)
)
returnable = {}
returnable['channels'] = results['facets']['channel'].keys()
returnable['messages'] = results['nbHits']
returnable['firstseen'] = results['hits'][0]['datestamp']
return returnable
| {
"repo_name": "phrawzty/ubunolia",
"path": "algoliahelper/algoliahelper.py",
"copies": "1",
"size": "3275",
"license": "mpl-2.0",
"hash": -2960955746323192300,
"line_mean": 29.3240740741,
"line_max": 77,
"alpha_frac": 0.546870229,
"autogenerated": false,
"ratio": 4.166666666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5213536895666667,
"avg_score": null,
"num_lines": null
} |
ALGOL_STYLE = {
'BACKGROUND_BLACK': '48;5;59',
'BACKGROUND_BLUE': '48;5;59',
'BACKGROUND_CYAN': '48;5;59',
'BACKGROUND_GREEN': '48;5;59',
'BACKGROUND_INTENSE_BLACK': '48;5;59',
'BACKGROUND_INTENSE_BLUE': '48;5;102',
'BACKGROUND_INTENSE_CYAN': '48;5;102',
'BACKGROUND_INTENSE_GREEN': '48;5;102',
'BACKGROUND_INTENSE_PURPLE': '48;5;102',
'BACKGROUND_INTENSE_RED': '48;5;09',
'BACKGROUND_INTENSE_WHITE': '48;5;102',
'BACKGROUND_INTENSE_YELLOW': '48;5;102',
'BACKGROUND_PURPLE': '48;5;59',
'BACKGROUND_RED': '48;5;09',
'BACKGROUND_WHITE': '48;5;102',
'BACKGROUND_YELLOW': '48;5;09',
'BLACK': '38;5;59',
'BLUE': '38;5;59',
'BOLD_BLACK': '1;38;5;59',
'BOLD_BLUE': '1;38;5;59',
'BOLD_CYAN': '1;38;5;59',
'BOLD_GREEN': '1;38;5;59',
'BOLD_INTENSE_BLACK': '1;38;5;59',
'BOLD_INTENSE_BLUE': '1;38;5;102',
'BOLD_INTENSE_CYAN': '1;38;5;102',
'BOLD_INTENSE_GREEN': '1;38;5;102',
'BOLD_INTENSE_PURPLE': '1;38;5;102',
'BOLD_INTENSE_RED': '1;38;5;09',
'BOLD_INTENSE_WHITE': '1;38;5;102',
'BOLD_INTENSE_YELLOW': '1;38;5;102',
'BOLD_PURPLE': '1;38;5;59',
'BOLD_RED': '1;38;5;09',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;59',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;59',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;59',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;59',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;59',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;102',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;102',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;102',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;102',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;09',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;102',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;102',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;59',
'BOLD_UNDERLINE_RED': '1;4;38;5;09',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;102',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;09',
'BOLD_WHITE': '1;38;5;102',
'BOLD_YELLOW': '1;38;5;09',
'CYAN': '38;5;59',
'GREEN': '38;5;59',
'INTENSE_BLACK': '38;5;59',
'INTENSE_BLUE': '38;5;102',
'INTENSE_CYAN': '38;5;102',
'INTENSE_GREEN': '38;5;102',
'INTENSE_PURPLE': '38;5;102',
'INTENSE_RED': '38;5;09',
'INTENSE_WHITE': '38;5;102',
'INTENSE_YELLOW': '38;5;102',
'NO_COLOR': '0',
'PURPLE': '38;5;59',
'RED': '38;5;09',
'UNDERLINE_BLACK': '4;38;5;59',
'UNDERLINE_BLUE': '4;38;5;59',
'UNDERLINE_CYAN': '4;38;5;59',
'UNDERLINE_GREEN': '4;38;5;59',
'UNDERLINE_INTENSE_BLACK': '4;38;5;59',
'UNDERLINE_INTENSE_BLUE': '4;38;5;102',
'UNDERLINE_INTENSE_CYAN': '4;38;5;102',
'UNDERLINE_INTENSE_GREEN': '4;38;5;102',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;102',
'UNDERLINE_INTENSE_RED': '4;38;5;09',
'UNDERLINE_INTENSE_WHITE': '4;38;5;102',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;102',
'UNDERLINE_PURPLE': '4;38;5;59',
'UNDERLINE_RED': '4;38;5;09',
'UNDERLINE_WHITE': '4;38;5;102',
'UNDERLINE_YELLOW': '4;38;5;09',
'WHITE': '38;5;102',
'YELLOW': '38;5;09',
}
ALGOL_NU_STYLE = {
'BACKGROUND_BLACK': '48;5;59',
'BACKGROUND_BLUE': '48;5;59',
'BACKGROUND_CYAN': '48;5;59',
'BACKGROUND_GREEN': '48;5;59',
'BACKGROUND_INTENSE_BLACK': '48;5;59',
'BACKGROUND_INTENSE_BLUE': '48;5;102',
'BACKGROUND_INTENSE_CYAN': '48;5;102',
'BACKGROUND_INTENSE_GREEN': '48;5;102',
'BACKGROUND_INTENSE_PURPLE': '48;5;102',
'BACKGROUND_INTENSE_RED': '48;5;09',
'BACKGROUND_INTENSE_WHITE': '48;5;102',
'BACKGROUND_INTENSE_YELLOW': '48;5;102',
'BACKGROUND_PURPLE': '48;5;59',
'BACKGROUND_RED': '48;5;09',
'BACKGROUND_WHITE': '48;5;102',
'BACKGROUND_YELLOW': '48;5;09',
'BLACK': '38;5;59',
'BLUE': '38;5;59',
'BOLD_BLACK': '1;38;5;59',
'BOLD_BLUE': '1;38;5;59',
'BOLD_CYAN': '1;38;5;59',
'BOLD_GREEN': '1;38;5;59',
'BOLD_INTENSE_BLACK': '1;38;5;59',
'BOLD_INTENSE_BLUE': '1;38;5;102',
'BOLD_INTENSE_CYAN': '1;38;5;102',
'BOLD_INTENSE_GREEN': '1;38;5;102',
'BOLD_INTENSE_PURPLE': '1;38;5;102',
'BOLD_INTENSE_RED': '1;38;5;09',
'BOLD_INTENSE_WHITE': '1;38;5;102',
'BOLD_INTENSE_YELLOW': '1;38;5;102',
'BOLD_PURPLE': '1;38;5;59',
'BOLD_RED': '1;38;5;09',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;59',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;59',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;59',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;59',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;59',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;102',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;102',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;102',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;102',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;09',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;102',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;102',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;59',
'BOLD_UNDERLINE_RED': '1;4;38;5;09',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;102',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;09',
'BOLD_WHITE': '1;38;5;102',
'BOLD_YELLOW': '1;38;5;09',
'CYAN': '38;5;59',
'GREEN': '38;5;59',
'INTENSE_BLACK': '38;5;59',
'INTENSE_BLUE': '38;5;102',
'INTENSE_CYAN': '38;5;102',
'INTENSE_GREEN': '38;5;102',
'INTENSE_PURPLE': '38;5;102',
'INTENSE_RED': '38;5;09',
'INTENSE_WHITE': '38;5;102',
'INTENSE_YELLOW': '38;5;102',
'NO_COLOR': '0',
'PURPLE': '38;5;59',
'RED': '38;5;09',
'UNDERLINE_BLACK': '4;38;5;59',
'UNDERLINE_BLUE': '4;38;5;59',
'UNDERLINE_CYAN': '4;38;5;59',
'UNDERLINE_GREEN': '4;38;5;59',
'UNDERLINE_INTENSE_BLACK': '4;38;5;59',
'UNDERLINE_INTENSE_BLUE': '4;38;5;102',
'UNDERLINE_INTENSE_CYAN': '4;38;5;102',
'UNDERLINE_INTENSE_GREEN': '4;38;5;102',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;102',
'UNDERLINE_INTENSE_RED': '4;38;5;09',
'UNDERLINE_INTENSE_WHITE': '4;38;5;102',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;102',
'UNDERLINE_PURPLE': '4;38;5;59',
'UNDERLINE_RED': '4;38;5;09',
'UNDERLINE_WHITE': '4;38;5;102',
'UNDERLINE_YELLOW': '4;38;5;09',
'WHITE': '38;5;102',
'YELLOW': '38;5;09',
}
AUTUMN_STYLE = {
'BACKGROUND_BLACK': '48;5;18',
'BACKGROUND_BLUE': '48;5;19',
'BACKGROUND_CYAN': '48;5;37',
'BACKGROUND_GREEN': '48;5;34',
'BACKGROUND_INTENSE_BLACK': '48;5;59',
'BACKGROUND_INTENSE_BLUE': '48;5;33',
'BACKGROUND_INTENSE_CYAN': '48;5;33',
'BACKGROUND_INTENSE_GREEN': '48;5;64',
'BACKGROUND_INTENSE_PURPLE': '48;5;217',
'BACKGROUND_INTENSE_RED': '48;5;130',
'BACKGROUND_INTENSE_WHITE': '48;5;145',
'BACKGROUND_INTENSE_YELLOW': '48;5;217',
'BACKGROUND_PURPLE': '48;5;90',
'BACKGROUND_RED': '48;5;124',
'BACKGROUND_WHITE': '48;5;145',
'BACKGROUND_YELLOW': '48;5;130',
'BLACK': '38;5;18',
'BLUE': '38;5;19',
'BOLD_BLACK': '1;38;5;18',
'BOLD_BLUE': '1;38;5;19',
'BOLD_CYAN': '1;38;5;37',
'BOLD_GREEN': '1;38;5;34',
'BOLD_INTENSE_BLACK': '1;38;5;59',
'BOLD_INTENSE_BLUE': '1;38;5;33',
'BOLD_INTENSE_CYAN': '1;38;5;33',
'BOLD_INTENSE_GREEN': '1;38;5;64',
'BOLD_INTENSE_PURPLE': '1;38;5;217',
'BOLD_INTENSE_RED': '1;38;5;130',
'BOLD_INTENSE_WHITE': '1;38;5;145',
'BOLD_INTENSE_YELLOW': '1;38;5;217',
'BOLD_PURPLE': '1;38;5;90',
'BOLD_RED': '1;38;5;124',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;18',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;19',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;37',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;34',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;59',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;33',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;33',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;64',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;217',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;130',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;145',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;217',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;90',
'BOLD_UNDERLINE_RED': '1;4;38;5;124',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;145',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;130',
'BOLD_WHITE': '1;38;5;145',
'BOLD_YELLOW': '1;38;5;130',
'CYAN': '38;5;37',
'GREEN': '38;5;34',
'INTENSE_BLACK': '38;5;59',
'INTENSE_BLUE': '38;5;33',
'INTENSE_CYAN': '38;5;33',
'INTENSE_GREEN': '38;5;64',
'INTENSE_PURPLE': '38;5;217',
'INTENSE_RED': '38;5;130',
'INTENSE_WHITE': '38;5;145',
'INTENSE_YELLOW': '38;5;217',
'NO_COLOR': '0',
'PURPLE': '38;5;90',
'RED': '38;5;124',
'UNDERLINE_BLACK': '4;38;5;18',
'UNDERLINE_BLUE': '4;38;5;19',
'UNDERLINE_CYAN': '4;38;5;37',
'UNDERLINE_GREEN': '4;38;5;34',
'UNDERLINE_INTENSE_BLACK': '4;38;5;59',
'UNDERLINE_INTENSE_BLUE': '4;38;5;33',
'UNDERLINE_INTENSE_CYAN': '4;38;5;33',
'UNDERLINE_INTENSE_GREEN': '4;38;5;64',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;217',
'UNDERLINE_INTENSE_RED': '4;38;5;130',
'UNDERLINE_INTENSE_WHITE': '4;38;5;145',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;217',
'UNDERLINE_PURPLE': '4;38;5;90',
'UNDERLINE_RED': '4;38;5;124',
'UNDERLINE_WHITE': '4;38;5;145',
'UNDERLINE_YELLOW': '4;38;5;130',
'WHITE': '38;5;145',
'YELLOW': '38;5;130',
}
BORLAND_STYLE = {
'BACKGROUND_BLACK': '48;5;16',
'BACKGROUND_BLUE': '48;5;18',
'BACKGROUND_CYAN': '48;5;30',
'BACKGROUND_GREEN': '48;5;28',
'BACKGROUND_INTENSE_BLACK': '48;5;59',
'BACKGROUND_INTENSE_BLUE': '48;5;21',
'BACKGROUND_INTENSE_CYAN': '48;5;194',
'BACKGROUND_INTENSE_GREEN': '48;5;102',
'BACKGROUND_INTENSE_PURPLE': '48;5;188',
'BACKGROUND_INTENSE_RED': '48;5;09',
'BACKGROUND_INTENSE_WHITE': '48;5;224',
'BACKGROUND_INTENSE_YELLOW': '48;5;188',
'BACKGROUND_PURPLE': '48;5;90',
'BACKGROUND_RED': '48;5;124',
'BACKGROUND_WHITE': '48;5;145',
'BACKGROUND_YELLOW': '48;5;124',
'BLACK': '38;5;16',
'BLUE': '38;5;18',
'BOLD_BLACK': '1;38;5;16',
'BOLD_BLUE': '1;38;5;18',
'BOLD_CYAN': '1;38;5;30',
'BOLD_GREEN': '1;38;5;28',
'BOLD_INTENSE_BLACK': '1;38;5;59',
'BOLD_INTENSE_BLUE': '1;38;5;21',
'BOLD_INTENSE_CYAN': '1;38;5;194',
'BOLD_INTENSE_GREEN': '1;38;5;102',
'BOLD_INTENSE_PURPLE': '1;38;5;188',
'BOLD_INTENSE_RED': '1;38;5;09',
'BOLD_INTENSE_WHITE': '1;38;5;224',
'BOLD_INTENSE_YELLOW': '1;38;5;188',
'BOLD_PURPLE': '1;38;5;90',
'BOLD_RED': '1;38;5;124',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;16',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;18',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;30',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;28',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;59',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;21',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;194',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;102',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;188',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;09',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;224',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;188',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;90',
'BOLD_UNDERLINE_RED': '1;4;38;5;124',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;145',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;124',
'BOLD_WHITE': '1;38;5;145',
'BOLD_YELLOW': '1;38;5;124',
'CYAN': '38;5;30',
'GREEN': '38;5;28',
'INTENSE_BLACK': '38;5;59',
'INTENSE_BLUE': '38;5;21',
'INTENSE_CYAN': '38;5;194',
'INTENSE_GREEN': '38;5;102',
'INTENSE_PURPLE': '38;5;188',
'INTENSE_RED': '38;5;09',
'INTENSE_WHITE': '38;5;224',
'INTENSE_YELLOW': '38;5;188',
'NO_COLOR': '0',
'PURPLE': '38;5;90',
'RED': '38;5;124',
'UNDERLINE_BLACK': '4;38;5;16',
'UNDERLINE_BLUE': '4;38;5;18',
'UNDERLINE_CYAN': '4;38;5;30',
'UNDERLINE_GREEN': '4;38;5;28',
'UNDERLINE_INTENSE_BLACK': '4;38;5;59',
'UNDERLINE_INTENSE_BLUE': '4;38;5;21',
'UNDERLINE_INTENSE_CYAN': '4;38;5;194',
'UNDERLINE_INTENSE_GREEN': '4;38;5;102',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;188',
'UNDERLINE_INTENSE_RED': '4;38;5;09',
'UNDERLINE_INTENSE_WHITE': '4;38;5;224',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;188',
'UNDERLINE_PURPLE': '4;38;5;90',
'UNDERLINE_RED': '4;38;5;124',
'UNDERLINE_WHITE': '4;38;5;145',
'UNDERLINE_YELLOW': '4;38;5;124',
'WHITE': '38;5;145',
'YELLOW': '38;5;124',
}
BW_STYLE = {
'BACKGROUND_BLACK': '48;5;09',
'BACKGROUND_BLUE': '48;5;09',
'BACKGROUND_CYAN': '48;5;09',
'BACKGROUND_GREEN': '48;5;09',
'BACKGROUND_INTENSE_BLACK': '48;5;09',
'BACKGROUND_INTENSE_BLUE': '48;5;09',
'BACKGROUND_INTENSE_CYAN': '48;5;09',
'BACKGROUND_INTENSE_GREEN': '48;5;09',
'BACKGROUND_INTENSE_PURPLE': '48;5;09',
'BACKGROUND_INTENSE_RED': '48;5;09',
'BACKGROUND_INTENSE_WHITE': '48;5;09',
'BACKGROUND_INTENSE_YELLOW': '48;5;09',
'BACKGROUND_PURPLE': '48;5;09',
'BACKGROUND_RED': '48;5;09',
'BACKGROUND_WHITE': '48;5;09',
'BACKGROUND_YELLOW': '48;5;09',
'BLACK': '38;5;09',
'BLUE': '38;5;09',
'BOLD_BLACK': '1;38;5;09',
'BOLD_BLUE': '1;38;5;09',
'BOLD_CYAN': '1;38;5;09',
'BOLD_GREEN': '1;38;5;09',
'BOLD_INTENSE_BLACK': '1;38;5;09',
'BOLD_INTENSE_BLUE': '1;38;5;09',
'BOLD_INTENSE_CYAN': '1;38;5;09',
'BOLD_INTENSE_GREEN': '1;38;5;09',
'BOLD_INTENSE_PURPLE': '1;38;5;09',
'BOLD_INTENSE_RED': '1;38;5;09',
'BOLD_INTENSE_WHITE': '1;38;5;09',
'BOLD_INTENSE_YELLOW': '1;38;5;09',
'BOLD_PURPLE': '1;38;5;09',
'BOLD_RED': '1;38;5;09',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;09',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;09',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;09',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;09',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;09',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;09',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;09',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;09',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;09',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;09',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;09',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;09',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;09',
'BOLD_UNDERLINE_RED': '1;4;38;5;09',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;09',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;09',
'BOLD_WHITE': '1;38;5;09',
'BOLD_YELLOW': '1;38;5;09',
'CYAN': '38;5;09',
'GREEN': '38;5;09',
'INTENSE_BLACK': '38;5;09',
'INTENSE_BLUE': '38;5;09',
'INTENSE_CYAN': '38;5;09',
'INTENSE_GREEN': '38;5;09',
'INTENSE_PURPLE': '38;5;09',
'INTENSE_RED': '38;5;09',
'INTENSE_WHITE': '38;5;09',
'INTENSE_YELLOW': '38;5;09',
'NO_COLOR': '0',
'PURPLE': '38;5;09',
'RED': '38;5;09',
'UNDERLINE_BLACK': '4;38;5;09',
'UNDERLINE_BLUE': '4;38;5;09',
'UNDERLINE_CYAN': '4;38;5;09',
'UNDERLINE_GREEN': '4;38;5;09',
'UNDERLINE_INTENSE_BLACK': '4;38;5;09',
'UNDERLINE_INTENSE_BLUE': '4;38;5;09',
'UNDERLINE_INTENSE_CYAN': '4;38;5;09',
'UNDERLINE_INTENSE_GREEN': '4;38;5;09',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;09',
'UNDERLINE_INTENSE_RED': '4;38;5;09',
'UNDERLINE_INTENSE_WHITE': '4;38;5;09',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;09',
'UNDERLINE_PURPLE': '4;38;5;09',
'UNDERLINE_RED': '4;38;5;09',
'UNDERLINE_WHITE': '4;38;5;09',
'UNDERLINE_YELLOW': '4;38;5;09',
'WHITE': '38;5;09',
'YELLOW': '38;5;09',
}
COLORFUL_STYLE = {
'BACKGROUND_BLACK': '48;5;16',
'BACKGROUND_BLUE': '48;5;20',
'BACKGROUND_CYAN': '48;5;31',
'BACKGROUND_GREEN': '48;5;34',
'BACKGROUND_INTENSE_BLACK': '48;5;59',
'BACKGROUND_INTENSE_BLUE': '48;5;61',
'BACKGROUND_INTENSE_CYAN': '48;5;145',
'BACKGROUND_INTENSE_GREEN': '48;5;102',
'BACKGROUND_INTENSE_PURPLE': '48;5;217',
'BACKGROUND_INTENSE_RED': '48;5;166',
'BACKGROUND_INTENSE_WHITE': '48;5;15',
'BACKGROUND_INTENSE_YELLOW': '48;5;217',
'BACKGROUND_PURPLE': '48;5;90',
'BACKGROUND_RED': '48;5;124',
'BACKGROUND_WHITE': '48;5;145',
'BACKGROUND_YELLOW': '48;5;130',
'BLACK': '38;5;16',
'BLUE': '38;5;20',
'BOLD_BLACK': '1;38;5;16',
'BOLD_BLUE': '1;38;5;20',
'BOLD_CYAN': '1;38;5;31',
'BOLD_GREEN': '1;38;5;34',
'BOLD_INTENSE_BLACK': '1;38;5;59',
'BOLD_INTENSE_BLUE': '1;38;5;61',
'BOLD_INTENSE_CYAN': '1;38;5;145',
'BOLD_INTENSE_GREEN': '1;38;5;102',
'BOLD_INTENSE_PURPLE': '1;38;5;217',
'BOLD_INTENSE_RED': '1;38;5;166',
'BOLD_INTENSE_WHITE': '1;38;5;15',
'BOLD_INTENSE_YELLOW': '1;38;5;217',
'BOLD_PURPLE': '1;38;5;90',
'BOLD_RED': '1;38;5;124',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;16',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;20',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;31',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;34',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;59',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;61',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;145',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;102',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;217',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;166',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;15',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;217',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;90',
'BOLD_UNDERLINE_RED': '1;4;38;5;124',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;145',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;130',
'BOLD_WHITE': '1;38;5;145',
'BOLD_YELLOW': '1;38;5;130',
'CYAN': '38;5;31',
'GREEN': '38;5;34',
'INTENSE_BLACK': '38;5;59',
'INTENSE_BLUE': '38;5;61',
'INTENSE_CYAN': '38;5;145',
'INTENSE_GREEN': '38;5;102',
'INTENSE_PURPLE': '38;5;217',
'INTENSE_RED': '38;5;166',
'INTENSE_WHITE': '38;5;15',
'INTENSE_YELLOW': '38;5;217',
'NO_COLOR': '0',
'PURPLE': '38;5;90',
'RED': '38;5;124',
'UNDERLINE_BLACK': '4;38;5;16',
'UNDERLINE_BLUE': '4;38;5;20',
'UNDERLINE_CYAN': '4;38;5;31',
'UNDERLINE_GREEN': '4;38;5;34',
'UNDERLINE_INTENSE_BLACK': '4;38;5;59',
'UNDERLINE_INTENSE_BLUE': '4;38;5;61',
'UNDERLINE_INTENSE_CYAN': '4;38;5;145',
'UNDERLINE_INTENSE_GREEN': '4;38;5;102',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;217',
'UNDERLINE_INTENSE_RED': '4;38;5;166',
'UNDERLINE_INTENSE_WHITE': '4;38;5;15',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;217',
'UNDERLINE_PURPLE': '4;38;5;90',
'UNDERLINE_RED': '4;38;5;124',
'UNDERLINE_WHITE': '4;38;5;145',
'UNDERLINE_YELLOW': '4;38;5;130',
'WHITE': '38;5;145',
'YELLOW': '38;5;130',
}
EMACS_STYLE = {
'BACKGROUND_BLACK': '48;5;28',
'BACKGROUND_BLUE': '48;5;18',
'BACKGROUND_CYAN': '48;5;26',
'BACKGROUND_GREEN': '48;5;34',
'BACKGROUND_INTENSE_BLACK': '48;5;59',
'BACKGROUND_INTENSE_BLUE': '48;5;26',
'BACKGROUND_INTENSE_CYAN': '48;5;145',
'BACKGROUND_INTENSE_GREEN': '48;5;34',
'BACKGROUND_INTENSE_PURPLE': '48;5;129',
'BACKGROUND_INTENSE_RED': '48;5;167',
'BACKGROUND_INTENSE_WHITE': '48;5;145',
'BACKGROUND_INTENSE_YELLOW': '48;5;145',
'BACKGROUND_PURPLE': '48;5;90',
'BACKGROUND_RED': '48;5;124',
'BACKGROUND_WHITE': '48;5;145',
'BACKGROUND_YELLOW': '48;5;130',
'BLACK': '38;5;28',
'BLUE': '38;5;18',
'BOLD_BLACK': '1;38;5;28',
'BOLD_BLUE': '1;38;5;18',
'BOLD_CYAN': '1;38;5;26',
'BOLD_GREEN': '1;38;5;34',
'BOLD_INTENSE_BLACK': '1;38;5;59',
'BOLD_INTENSE_BLUE': '1;38;5;26',
'BOLD_INTENSE_CYAN': '1;38;5;145',
'BOLD_INTENSE_GREEN': '1;38;5;34',
'BOLD_INTENSE_PURPLE': '1;38;5;129',
'BOLD_INTENSE_RED': '1;38;5;167',
'BOLD_INTENSE_WHITE': '1;38;5;145',
'BOLD_INTENSE_YELLOW': '1;38;5;145',
'BOLD_PURPLE': '1;38;5;90',
'BOLD_RED': '1;38;5;124',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;28',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;18',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;26',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;34',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;59',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;26',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;145',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;34',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;129',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;167',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;145',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;145',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;90',
'BOLD_UNDERLINE_RED': '1;4;38;5;124',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;145',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;130',
'BOLD_WHITE': '1;38;5;145',
'BOLD_YELLOW': '1;38;5;130',
'CYAN': '38;5;26',
'GREEN': '38;5;34',
'INTENSE_BLACK': '38;5;59',
'INTENSE_BLUE': '38;5;26',
'INTENSE_CYAN': '38;5;145',
'INTENSE_GREEN': '38;5;34',
'INTENSE_PURPLE': '38;5;129',
'INTENSE_RED': '38;5;167',
'INTENSE_WHITE': '38;5;145',
'INTENSE_YELLOW': '38;5;145',
'NO_COLOR': '0',
'PURPLE': '38;5;90',
'RED': '38;5;124',
'UNDERLINE_BLACK': '4;38;5;28',
'UNDERLINE_BLUE': '4;38;5;18',
'UNDERLINE_CYAN': '4;38;5;26',
'UNDERLINE_GREEN': '4;38;5;34',
'UNDERLINE_INTENSE_BLACK': '4;38;5;59',
'UNDERLINE_INTENSE_BLUE': '4;38;5;26',
'UNDERLINE_INTENSE_CYAN': '4;38;5;145',
'UNDERLINE_INTENSE_GREEN': '4;38;5;34',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;129',
'UNDERLINE_INTENSE_RED': '4;38;5;167',
'UNDERLINE_INTENSE_WHITE': '4;38;5;145',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;145',
'UNDERLINE_PURPLE': '4;38;5;90',
'UNDERLINE_RED': '4;38;5;124',
'UNDERLINE_WHITE': '4;38;5;145',
'UNDERLINE_YELLOW': '4;38;5;130',
'WHITE': '38;5;145',
'YELLOW': '38;5;130',
}
FRIENDLY_STYLE = {
'BACKGROUND_BLACK': '48;5;22',
'BACKGROUND_BLUE': '48;5;18',
'BACKGROUND_CYAN': '48;5;31',
'BACKGROUND_GREEN': '48;5;34',
'BACKGROUND_INTENSE_BLACK': '48;5;59',
'BACKGROUND_INTENSE_BLUE': '48;5;74',
'BACKGROUND_INTENSE_CYAN': '48;5;74',
'BACKGROUND_INTENSE_GREEN': '48;5;71',
'BACKGROUND_INTENSE_PURPLE': '48;5;134',
'BACKGROUND_INTENSE_RED': '48;5;167',
'BACKGROUND_INTENSE_WHITE': '48;5;15',
'BACKGROUND_INTENSE_YELLOW': '48;5;145',
'BACKGROUND_PURPLE': '48;5;90',
'BACKGROUND_RED': '48;5;124',
'BACKGROUND_WHITE': '48;5;145',
'BACKGROUND_YELLOW': '48;5;166',
'BLACK': '38;5;22',
'BLUE': '38;5;18',
'BOLD_BLACK': '1;38;5;22',
'BOLD_BLUE': '1;38;5;18',
'BOLD_CYAN': '1;38;5;31',
'BOLD_GREEN': '1;38;5;34',
'BOLD_INTENSE_BLACK': '1;38;5;59',
'BOLD_INTENSE_BLUE': '1;38;5;74',
'BOLD_INTENSE_CYAN': '1;38;5;74',
'BOLD_INTENSE_GREEN': '1;38;5;71',
'BOLD_INTENSE_PURPLE': '1;38;5;134',
'BOLD_INTENSE_RED': '1;38;5;167',
'BOLD_INTENSE_WHITE': '1;38;5;15',
'BOLD_INTENSE_YELLOW': '1;38;5;145',
'BOLD_PURPLE': '1;38;5;90',
'BOLD_RED': '1;38;5;124',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;22',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;18',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;31',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;34',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;59',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;74',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;74',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;71',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;134',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;167',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;15',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;145',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;90',
'BOLD_UNDERLINE_RED': '1;4;38;5;124',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;145',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;166',
'BOLD_WHITE': '1;38;5;145',
'BOLD_YELLOW': '1;38;5;166',
'CYAN': '38;5;31',
'GREEN': '38;5;34',
'INTENSE_BLACK': '38;5;59',
'INTENSE_BLUE': '38;5;74',
'INTENSE_CYAN': '38;5;74',
'INTENSE_GREEN': '38;5;71',
'INTENSE_PURPLE': '38;5;134',
'INTENSE_RED': '38;5;167',
'INTENSE_WHITE': '38;5;15',
'INTENSE_YELLOW': '38;5;145',
'NO_COLOR': '0',
'PURPLE': '38;5;90',
'RED': '38;5;124',
'UNDERLINE_BLACK': '4;38;5;22',
'UNDERLINE_BLUE': '4;38;5;18',
'UNDERLINE_CYAN': '4;38;5;31',
'UNDERLINE_GREEN': '4;38;5;34',
'UNDERLINE_INTENSE_BLACK': '4;38;5;59',
'UNDERLINE_INTENSE_BLUE': '4;38;5;74',
'UNDERLINE_INTENSE_CYAN': '4;38;5;74',
'UNDERLINE_INTENSE_GREEN': '4;38;5;71',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;134',
'UNDERLINE_INTENSE_RED': '4;38;5;167',
'UNDERLINE_INTENSE_WHITE': '4;38;5;15',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;145',
'UNDERLINE_PURPLE': '4;38;5;90',
'UNDERLINE_RED': '4;38;5;124',
'UNDERLINE_WHITE': '4;38;5;145',
'UNDERLINE_YELLOW': '4;38;5;166',
'WHITE': '38;5;145',
'YELLOW': '38;5;166',
}
FRUITY_STYLE = {
'BACKGROUND_BLACK': '48;5;16',
'BACKGROUND_BLUE': '48;5;32',
'BACKGROUND_CYAN': '48;5;32',
'BACKGROUND_GREEN': '48;5;28',
'BACKGROUND_INTENSE_BLACK': '48;5;59',
'BACKGROUND_INTENSE_BLUE': '48;5;33',
'BACKGROUND_INTENSE_CYAN': '48;5;33',
'BACKGROUND_INTENSE_GREEN': '48;5;102',
'BACKGROUND_INTENSE_PURPLE': '48;5;198',
'BACKGROUND_INTENSE_RED': '48;5;202',
'BACKGROUND_INTENSE_WHITE': '48;5;15',
'BACKGROUND_INTENSE_YELLOW': '48;5;187',
'BACKGROUND_PURPLE': '48;5;198',
'BACKGROUND_RED': '48;5;09',
'BACKGROUND_WHITE': '48;5;187',
'BACKGROUND_YELLOW': '48;5;202',
'BLACK': '38;5;16',
'BLUE': '38;5;32',
'BOLD_BLACK': '1;38;5;16',
'BOLD_BLUE': '1;38;5;32',
'BOLD_CYAN': '1;38;5;32',
'BOLD_GREEN': '1;38;5;28',
'BOLD_INTENSE_BLACK': '1;38;5;59',
'BOLD_INTENSE_BLUE': '1;38;5;33',
'BOLD_INTENSE_CYAN': '1;38;5;33',
'BOLD_INTENSE_GREEN': '1;38;5;102',
'BOLD_INTENSE_PURPLE': '1;38;5;198',
'BOLD_INTENSE_RED': '1;38;5;202',
'BOLD_INTENSE_WHITE': '1;38;5;15',
'BOLD_INTENSE_YELLOW': '1;38;5;187',
'BOLD_PURPLE': '1;38;5;198',
'BOLD_RED': '1;38;5;09',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;16',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;32',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;32',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;28',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;59',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;33',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;33',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;102',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;198',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;202',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;15',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;187',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;198',
'BOLD_UNDERLINE_RED': '1;4;38;5;09',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;187',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;202',
'BOLD_WHITE': '1;38;5;187',
'BOLD_YELLOW': '1;38;5;202',
'CYAN': '38;5;32',
'GREEN': '38;5;28',
'INTENSE_BLACK': '38;5;59',
'INTENSE_BLUE': '38;5;33',
'INTENSE_CYAN': '38;5;33',
'INTENSE_GREEN': '38;5;102',
'INTENSE_PURPLE': '38;5;198',
'INTENSE_RED': '38;5;202',
'INTENSE_WHITE': '38;5;15',
'INTENSE_YELLOW': '38;5;187',
'NO_COLOR': '0',
'PURPLE': '38;5;198',
'RED': '38;5;09',
'UNDERLINE_BLACK': '4;38;5;16',
'UNDERLINE_BLUE': '4;38;5;32',
'UNDERLINE_CYAN': '4;38;5;32',
'UNDERLINE_GREEN': '4;38;5;28',
'UNDERLINE_INTENSE_BLACK': '4;38;5;59',
'UNDERLINE_INTENSE_BLUE': '4;38;5;33',
'UNDERLINE_INTENSE_CYAN': '4;38;5;33',
'UNDERLINE_INTENSE_GREEN': '4;38;5;102',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;198',
'UNDERLINE_INTENSE_RED': '4;38;5;202',
'UNDERLINE_INTENSE_WHITE': '4;38;5;15',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;187',
'UNDERLINE_PURPLE': '4;38;5;198',
'UNDERLINE_RED': '4;38;5;09',
'UNDERLINE_WHITE': '4;38;5;187',
'UNDERLINE_YELLOW': '4;38;5;202',
'WHITE': '38;5;187',
'YELLOW': '38;5;202',
}
IGOR_STYLE = {
'BACKGROUND_BLACK': '48;5;34',
'BACKGROUND_BLUE': '48;5;21',
'BACKGROUND_CYAN': '48;5;30',
'BACKGROUND_GREEN': '48;5;34',
'BACKGROUND_INTENSE_BLACK': '48;5;30',
'BACKGROUND_INTENSE_BLUE': '48;5;21',
'BACKGROUND_INTENSE_CYAN': '48;5;30',
'BACKGROUND_INTENSE_GREEN': '48;5;34',
'BACKGROUND_INTENSE_PURPLE': '48;5;163',
'BACKGROUND_INTENSE_RED': '48;5;166',
'BACKGROUND_INTENSE_WHITE': '48;5;163',
'BACKGROUND_INTENSE_YELLOW': '48;5;166',
'BACKGROUND_PURPLE': '48;5;163',
'BACKGROUND_RED': '48;5;166',
'BACKGROUND_WHITE': '48;5;163',
'BACKGROUND_YELLOW': '48;5;166',
'BLACK': '38;5;34',
'BLUE': '38;5;21',
'BOLD_BLACK': '1;38;5;34',
'BOLD_BLUE': '1;38;5;21',
'BOLD_CYAN': '1;38;5;30',
'BOLD_GREEN': '1;38;5;34',
'BOLD_INTENSE_BLACK': '1;38;5;30',
'BOLD_INTENSE_BLUE': '1;38;5;21',
'BOLD_INTENSE_CYAN': '1;38;5;30',
'BOLD_INTENSE_GREEN': '1;38;5;34',
'BOLD_INTENSE_PURPLE': '1;38;5;163',
'BOLD_INTENSE_RED': '1;38;5;166',
'BOLD_INTENSE_WHITE': '1;38;5;163',
'BOLD_INTENSE_YELLOW': '1;38;5;166',
'BOLD_PURPLE': '1;38;5;163',
'BOLD_RED': '1;38;5;166',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;34',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;21',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;30',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;34',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;30',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;21',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;30',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;34',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;163',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;166',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;163',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;166',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;163',
'BOLD_UNDERLINE_RED': '1;4;38;5;166',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;163',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;166',
'BOLD_WHITE': '1;38;5;163',
'BOLD_YELLOW': '1;38;5;166',
'CYAN': '38;5;30',
'GREEN': '38;5;34',
'INTENSE_BLACK': '38;5;30',
'INTENSE_BLUE': '38;5;21',
'INTENSE_CYAN': '38;5;30',
'INTENSE_GREEN': '38;5;34',
'INTENSE_PURPLE': '38;5;163',
'INTENSE_RED': '38;5;166',
'INTENSE_WHITE': '38;5;163',
'INTENSE_YELLOW': '38;5;166',
'NO_COLOR': '0',
'PURPLE': '38;5;163',
'RED': '38;5;166',
'UNDERLINE_BLACK': '4;38;5;34',
'UNDERLINE_BLUE': '4;38;5;21',
'UNDERLINE_CYAN': '4;38;5;30',
'UNDERLINE_GREEN': '4;38;5;34',
'UNDERLINE_INTENSE_BLACK': '4;38;5;30',
'UNDERLINE_INTENSE_BLUE': '4;38;5;21',
'UNDERLINE_INTENSE_CYAN': '4;38;5;30',
'UNDERLINE_INTENSE_GREEN': '4;38;5;34',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;163',
'UNDERLINE_INTENSE_RED': '4;38;5;166',
'UNDERLINE_INTENSE_WHITE': '4;38;5;163',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;166',
'UNDERLINE_PURPLE': '4;38;5;163',
'UNDERLINE_RED': '4;38;5;166',
'UNDERLINE_WHITE': '4;38;5;163',
'UNDERLINE_YELLOW': '4;38;5;166',
'WHITE': '38;5;163',
'YELLOW': '38;5;166',
}
LOVELACE_STYLE = {
'BACKGROUND_BLACK': '48;5;59',
'BACKGROUND_BLUE': '48;5;25',
'BACKGROUND_CYAN': '48;5;29',
'BACKGROUND_GREEN': '48;5;65',
'BACKGROUND_INTENSE_BLACK': '48;5;59',
'BACKGROUND_INTENSE_BLUE': '48;5;25',
'BACKGROUND_INTENSE_CYAN': '48;5;102',
'BACKGROUND_INTENSE_GREEN': '48;5;29',
'BACKGROUND_INTENSE_PURPLE': '48;5;133',
'BACKGROUND_INTENSE_RED': '48;5;131',
'BACKGROUND_INTENSE_WHITE': '48;5;102',
'BACKGROUND_INTENSE_YELLOW': '48;5;136',
'BACKGROUND_PURPLE': '48;5;133',
'BACKGROUND_RED': '48;5;124',
'BACKGROUND_WHITE': '48;5;102',
'BACKGROUND_YELLOW': '48;5;130',
'BLACK': '38;5;59',
'BLUE': '38;5;25',
'BOLD_BLACK': '1;38;5;59',
'BOLD_BLUE': '1;38;5;25',
'BOLD_CYAN': '1;38;5;29',
'BOLD_GREEN': '1;38;5;65',
'BOLD_INTENSE_BLACK': '1;38;5;59',
'BOLD_INTENSE_BLUE': '1;38;5;25',
'BOLD_INTENSE_CYAN': '1;38;5;102',
'BOLD_INTENSE_GREEN': '1;38;5;29',
'BOLD_INTENSE_PURPLE': '1;38;5;133',
'BOLD_INTENSE_RED': '1;38;5;131',
'BOLD_INTENSE_WHITE': '1;38;5;102',
'BOLD_INTENSE_YELLOW': '1;38;5;136',
'BOLD_PURPLE': '1;38;5;133',
'BOLD_RED': '1;38;5;124',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;59',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;25',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;29',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;65',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;59',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;25',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;102',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;29',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;133',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;131',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;102',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;136',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;133',
'BOLD_UNDERLINE_RED': '1;4;38;5;124',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;102',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;130',
'BOLD_WHITE': '1;38;5;102',
'BOLD_YELLOW': '1;38;5;130',
'CYAN': '38;5;29',
'GREEN': '38;5;65',
'INTENSE_BLACK': '38;5;59',
'INTENSE_BLUE': '38;5;25',
'INTENSE_CYAN': '38;5;102',
'INTENSE_GREEN': '38;5;29',
'INTENSE_PURPLE': '38;5;133',
'INTENSE_RED': '38;5;131',
'INTENSE_WHITE': '38;5;102',
'INTENSE_YELLOW': '38;5;136',
'NO_COLOR': '0',
'PURPLE': '38;5;133',
'RED': '38;5;124',
'UNDERLINE_BLACK': '4;38;5;59',
'UNDERLINE_BLUE': '4;38;5;25',
'UNDERLINE_CYAN': '4;38;5;29',
'UNDERLINE_GREEN': '4;38;5;65',
'UNDERLINE_INTENSE_BLACK': '4;38;5;59',
'UNDERLINE_INTENSE_BLUE': '4;38;5;25',
'UNDERLINE_INTENSE_CYAN': '4;38;5;102',
'UNDERLINE_INTENSE_GREEN': '4;38;5;29',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;133',
'UNDERLINE_INTENSE_RED': '4;38;5;131',
'UNDERLINE_INTENSE_WHITE': '4;38;5;102',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;136',
'UNDERLINE_PURPLE': '4;38;5;133',
'UNDERLINE_RED': '4;38;5;124',
'UNDERLINE_WHITE': '4;38;5;102',
'UNDERLINE_YELLOW': '4;38;5;130',
'WHITE': '38;5;102',
'YELLOW': '38;5;130',
}
MANNI_STYLE = {
'BACKGROUND_BLACK': '48;5;16',
'BACKGROUND_BLUE': '48;5;18',
'BACKGROUND_CYAN': '48;5;30',
'BACKGROUND_GREEN': '48;5;40',
'BACKGROUND_INTENSE_BLACK': '48;5;59',
'BACKGROUND_INTENSE_BLUE': '48;5;105',
'BACKGROUND_INTENSE_CYAN': '48;5;45',
'BACKGROUND_INTENSE_GREEN': '48;5;113',
'BACKGROUND_INTENSE_PURPLE': '48;5;165',
'BACKGROUND_INTENSE_RED': '48;5;202',
'BACKGROUND_INTENSE_WHITE': '48;5;224',
'BACKGROUND_INTENSE_YELLOW': '48;5;221',
'BACKGROUND_PURPLE': '48;5;165',
'BACKGROUND_RED': '48;5;124',
'BACKGROUND_WHITE': '48;5;145',
'BACKGROUND_YELLOW': '48;5;166',
'BLACK': '38;5;16',
'BLUE': '38;5;18',
'BOLD_BLACK': '1;38;5;16',
'BOLD_BLUE': '1;38;5;18',
'BOLD_CYAN': '1;38;5;30',
'BOLD_GREEN': '1;38;5;40',
'BOLD_INTENSE_BLACK': '1;38;5;59',
'BOLD_INTENSE_BLUE': '1;38;5;105',
'BOLD_INTENSE_CYAN': '1;38;5;45',
'BOLD_INTENSE_GREEN': '1;38;5;113',
'BOLD_INTENSE_PURPLE': '1;38;5;165',
'BOLD_INTENSE_RED': '1;38;5;202',
'BOLD_INTENSE_WHITE': '1;38;5;224',
'BOLD_INTENSE_YELLOW': '1;38;5;221',
'BOLD_PURPLE': '1;38;5;165',
'BOLD_RED': '1;38;5;124',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;16',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;18',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;30',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;40',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;59',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;105',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;45',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;113',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;165',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;202',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;224',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;221',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;165',
'BOLD_UNDERLINE_RED': '1;4;38;5;124',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;145',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;166',
'BOLD_WHITE': '1;38;5;145',
'BOLD_YELLOW': '1;38;5;166',
'CYAN': '38;5;30',
'GREEN': '38;5;40',
'INTENSE_BLACK': '38;5;59',
'INTENSE_BLUE': '38;5;105',
'INTENSE_CYAN': '38;5;45',
'INTENSE_GREEN': '38;5;113',
'INTENSE_PURPLE': '38;5;165',
'INTENSE_RED': '38;5;202',
'INTENSE_WHITE': '38;5;224',
'INTENSE_YELLOW': '38;5;221',
'NO_COLOR': '0',
'PURPLE': '38;5;165',
'RED': '38;5;124',
'UNDERLINE_BLACK': '4;38;5;16',
'UNDERLINE_BLUE': '4;38;5;18',
'UNDERLINE_CYAN': '4;38;5;30',
'UNDERLINE_GREEN': '4;38;5;40',
'UNDERLINE_INTENSE_BLACK': '4;38;5;59',
'UNDERLINE_INTENSE_BLUE': '4;38;5;105',
'UNDERLINE_INTENSE_CYAN': '4;38;5;45',
'UNDERLINE_INTENSE_GREEN': '4;38;5;113',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;165',
'UNDERLINE_INTENSE_RED': '4;38;5;202',
'UNDERLINE_INTENSE_WHITE': '4;38;5;224',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;221',
'UNDERLINE_PURPLE': '4;38;5;165',
'UNDERLINE_RED': '4;38;5;124',
'UNDERLINE_WHITE': '4;38;5;145',
'UNDERLINE_YELLOW': '4;38;5;166',
'WHITE': '38;5;145',
'YELLOW': '38;5;166',
}
MONOKAI_STYLE = {
'BACKGROUND_BLACK': '48;5;16',
'BACKGROUND_BLUE': '48;5;16',
'BACKGROUND_CYAN': '48;5;81',
'BACKGROUND_GREEN': '48;5;95',
'BACKGROUND_INTENSE_BLACK': '48;5;95',
'BACKGROUND_INTENSE_BLUE': '48;5;141',
'BACKGROUND_INTENSE_CYAN': '48;5;81',
'BACKGROUND_INTENSE_GREEN': '48;5;148',
'BACKGROUND_INTENSE_PURPLE': '48;5;141',
'BACKGROUND_INTENSE_RED': '48;5;197',
'BACKGROUND_INTENSE_WHITE': '48;5;15',
'BACKGROUND_INTENSE_YELLOW': '48;5;186',
'BACKGROUND_PURPLE': '48;5;89',
'BACKGROUND_RED': '48;5;89',
'BACKGROUND_WHITE': '48;5;186',
'BACKGROUND_YELLOW': '48;5;95',
'BLACK': '38;5;16',
'BLUE': '38;5;16',
'BOLD_BLACK': '1;38;5;16',
'BOLD_BLUE': '1;38;5;16',
'BOLD_CYAN': '1;38;5;81',
'BOLD_GREEN': '1;38;5;95',
'BOLD_INTENSE_BLACK': '1;38;5;95',
'BOLD_INTENSE_BLUE': '1;38;5;141',
'BOLD_INTENSE_CYAN': '1;38;5;81',
'BOLD_INTENSE_GREEN': '1;38;5;148',
'BOLD_INTENSE_PURPLE': '1;38;5;141',
'BOLD_INTENSE_RED': '1;38;5;197',
'BOLD_INTENSE_WHITE': '1;38;5;15',
'BOLD_INTENSE_YELLOW': '1;38;5;186',
'BOLD_PURPLE': '1;38;5;89',
'BOLD_RED': '1;38;5;89',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;16',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;16',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;81',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;95',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;95',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;141',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;81',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;148',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;141',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;197',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;15',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;186',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;89',
'BOLD_UNDERLINE_RED': '1;4;38;5;89',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;186',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;95',
'BOLD_WHITE': '1;38;5;186',
'BOLD_YELLOW': '1;38;5;95',
'CYAN': '38;5;81',
'GREEN': '38;5;95',
'INTENSE_BLACK': '38;5;95',
'INTENSE_BLUE': '38;5;141',
'INTENSE_CYAN': '38;5;81',
'INTENSE_GREEN': '38;5;148',
'INTENSE_PURPLE': '38;5;141',
'INTENSE_RED': '38;5;197',
'INTENSE_WHITE': '38;5;15',
'INTENSE_YELLOW': '38;5;186',
'NO_COLOR': '0',
'PURPLE': '38;5;89',
'RED': '38;5;89',
'UNDERLINE_BLACK': '4;38;5;16',
'UNDERLINE_BLUE': '4;38;5;16',
'UNDERLINE_CYAN': '4;38;5;81',
'UNDERLINE_GREEN': '4;38;5;95',
'UNDERLINE_INTENSE_BLACK': '4;38;5;95',
'UNDERLINE_INTENSE_BLUE': '4;38;5;141',
'UNDERLINE_INTENSE_CYAN': '4;38;5;81',
'UNDERLINE_INTENSE_GREEN': '4;38;5;148',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;141',
'UNDERLINE_INTENSE_RED': '4;38;5;197',
'UNDERLINE_INTENSE_WHITE': '4;38;5;15',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;186',
'UNDERLINE_PURPLE': '4;38;5;89',
'UNDERLINE_RED': '4;38;5;89',
'UNDERLINE_WHITE': '4;38;5;186',
'UNDERLINE_YELLOW': '4;38;5;95',
'WHITE': '38;5;186',
'YELLOW': '38;5;95',
}
MURPHY_STYLE = {
'BACKGROUND_BLACK': '48;5;16',
'BACKGROUND_BLUE': '48;5;18',
'BACKGROUND_CYAN': '48;5;31',
'BACKGROUND_GREEN': '48;5;34',
'BACKGROUND_INTENSE_BLACK': '48;5;59',
'BACKGROUND_INTENSE_BLUE': '48;5;63',
'BACKGROUND_INTENSE_CYAN': '48;5;86',
'BACKGROUND_INTENSE_GREEN': '48;5;86',
'BACKGROUND_INTENSE_PURPLE': '48;5;213',
'BACKGROUND_INTENSE_RED': '48;5;209',
'BACKGROUND_INTENSE_WHITE': '48;5;15',
'BACKGROUND_INTENSE_YELLOW': '48;5;222',
'BACKGROUND_PURPLE': '48;5;90',
'BACKGROUND_RED': '48;5;124',
'BACKGROUND_WHITE': '48;5;145',
'BACKGROUND_YELLOW': '48;5;166',
'BLACK': '38;5;16',
'BLUE': '38;5;18',
'BOLD_BLACK': '1;38;5;16',
'BOLD_BLUE': '1;38;5;18',
'BOLD_CYAN': '1;38;5;31',
'BOLD_GREEN': '1;38;5;34',
'BOLD_INTENSE_BLACK': '1;38;5;59',
'BOLD_INTENSE_BLUE': '1;38;5;63',
'BOLD_INTENSE_CYAN': '1;38;5;86',
'BOLD_INTENSE_GREEN': '1;38;5;86',
'BOLD_INTENSE_PURPLE': '1;38;5;213',
'BOLD_INTENSE_RED': '1;38;5;209',
'BOLD_INTENSE_WHITE': '1;38;5;15',
'BOLD_INTENSE_YELLOW': '1;38;5;222',
'BOLD_PURPLE': '1;38;5;90',
'BOLD_RED': '1;38;5;124',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;16',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;18',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;31',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;34',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;59',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;63',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;86',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;86',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;213',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;209',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;15',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;222',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;90',
'BOLD_UNDERLINE_RED': '1;4;38;5;124',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;145',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;166',
'BOLD_WHITE': '1;38;5;145',
'BOLD_YELLOW': '1;38;5;166',
'CYAN': '38;5;31',
'GREEN': '38;5;34',
'INTENSE_BLACK': '38;5;59',
'INTENSE_BLUE': '38;5;63',
'INTENSE_CYAN': '38;5;86',
'INTENSE_GREEN': '38;5;86',
'INTENSE_PURPLE': '38;5;213',
'INTENSE_RED': '38;5;209',
'INTENSE_WHITE': '38;5;15',
'INTENSE_YELLOW': '38;5;222',
'NO_COLOR': '0',
'PURPLE': '38;5;90',
'RED': '38;5;124',
'UNDERLINE_BLACK': '4;38;5;16',
'UNDERLINE_BLUE': '4;38;5;18',
'UNDERLINE_CYAN': '4;38;5;31',
'UNDERLINE_GREEN': '4;38;5;34',
'UNDERLINE_INTENSE_BLACK': '4;38;5;59',
'UNDERLINE_INTENSE_BLUE': '4;38;5;63',
'UNDERLINE_INTENSE_CYAN': '4;38;5;86',
'UNDERLINE_INTENSE_GREEN': '4;38;5;86',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;213',
'UNDERLINE_INTENSE_RED': '4;38;5;209',
'UNDERLINE_INTENSE_WHITE': '4;38;5;15',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;222',
'UNDERLINE_PURPLE': '4;38;5;90',
'UNDERLINE_RED': '4;38;5;124',
'UNDERLINE_WHITE': '4;38;5;145',
'UNDERLINE_YELLOW': '4;38;5;166',
'WHITE': '38;5;145',
'YELLOW': '38;5;166',
}
NATIVE_STYLE = {
'BACKGROUND_BLACK': '48;5;52',
'BACKGROUND_BLUE': '48;5;67',
'BACKGROUND_CYAN': '48;5;31',
'BACKGROUND_GREEN': '48;5;64',
'BACKGROUND_INTENSE_BLACK': '48;5;59',
'BACKGROUND_INTENSE_BLUE': '48;5;68',
'BACKGROUND_INTENSE_CYAN': '48;5;87',
'BACKGROUND_INTENSE_GREEN': '48;5;70',
'BACKGROUND_INTENSE_PURPLE': '48;5;188',
'BACKGROUND_INTENSE_RED': '48;5;160',
'BACKGROUND_INTENSE_WHITE': '48;5;15',
'BACKGROUND_INTENSE_YELLOW': '48;5;214',
'BACKGROUND_PURPLE': '48;5;59',
'BACKGROUND_RED': '48;5;124',
'BACKGROUND_WHITE': '48;5;145',
'BACKGROUND_YELLOW': '48;5;124',
'BLACK': '38;5;52',
'BLUE': '38;5;67',
'BOLD_BLACK': '1;38;5;52',
'BOLD_BLUE': '1;38;5;67',
'BOLD_CYAN': '1;38;5;31',
'BOLD_GREEN': '1;38;5;64',
'BOLD_INTENSE_BLACK': '1;38;5;59',
'BOLD_INTENSE_BLUE': '1;38;5;68',
'BOLD_INTENSE_CYAN': '1;38;5;87',
'BOLD_INTENSE_GREEN': '1;38;5;70',
'BOLD_INTENSE_PURPLE': '1;38;5;188',
'BOLD_INTENSE_RED': '1;38;5;160',
'BOLD_INTENSE_WHITE': '1;38;5;15',
'BOLD_INTENSE_YELLOW': '1;38;5;214',
'BOLD_PURPLE': '1;38;5;59',
'BOLD_RED': '1;38;5;124',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;52',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;67',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;31',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;64',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;59',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;68',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;87',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;70',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;188',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;160',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;15',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;214',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;59',
'BOLD_UNDERLINE_RED': '1;4;38;5;124',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;145',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;124',
'BOLD_WHITE': '1;38;5;145',
'BOLD_YELLOW': '1;38;5;124',
'CYAN': '38;5;31',
'GREEN': '38;5;64',
'INTENSE_BLACK': '38;5;59',
'INTENSE_BLUE': '38;5;68',
'INTENSE_CYAN': '38;5;87',
'INTENSE_GREEN': '38;5;70',
'INTENSE_PURPLE': '38;5;188',
'INTENSE_RED': '38;5;160',
'INTENSE_WHITE': '38;5;15',
'INTENSE_YELLOW': '38;5;214',
'NO_COLOR': '0',
'PURPLE': '38;5;59',
'RED': '38;5;124',
'UNDERLINE_BLACK': '4;38;5;52',
'UNDERLINE_BLUE': '4;38;5;67',
'UNDERLINE_CYAN': '4;38;5;31',
'UNDERLINE_GREEN': '4;38;5;64',
'UNDERLINE_INTENSE_BLACK': '4;38;5;59',
'UNDERLINE_INTENSE_BLUE': '4;38;5;68',
'UNDERLINE_INTENSE_CYAN': '4;38;5;87',
'UNDERLINE_INTENSE_GREEN': '4;38;5;70',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;188',
'UNDERLINE_INTENSE_RED': '4;38;5;160',
'UNDERLINE_INTENSE_WHITE': '4;38;5;15',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;214',
'UNDERLINE_PURPLE': '4;38;5;59',
'UNDERLINE_RED': '4;38;5;124',
'UNDERLINE_WHITE': '4;38;5;145',
'UNDERLINE_YELLOW': '4;38;5;124',
'WHITE': '38;5;145',
'YELLOW': '38;5;124',
}
PARAISO_DARK_STYLE = {
'BACKGROUND_BLACK': '48;5;95',
'BACKGROUND_BLUE': '48;5;97',
'BACKGROUND_CYAN': '48;5;39',
'BACKGROUND_GREEN': '48;5;72',
'BACKGROUND_INTENSE_BLACK': '48;5;95',
'BACKGROUND_INTENSE_BLUE': '48;5;97',
'BACKGROUND_INTENSE_CYAN': '48;5;79',
'BACKGROUND_INTENSE_GREEN': '48;5;72',
'BACKGROUND_INTENSE_PURPLE': '48;5;188',
'BACKGROUND_INTENSE_RED': '48;5;203',
'BACKGROUND_INTENSE_WHITE': '48;5;188',
'BACKGROUND_INTENSE_YELLOW': '48;5;220',
'BACKGROUND_PURPLE': '48;5;97',
'BACKGROUND_RED': '48;5;203',
'BACKGROUND_WHITE': '48;5;79',
'BACKGROUND_YELLOW': '48;5;214',
'BLACK': '38;5;95',
'BLUE': '38;5;97',
'BOLD_BLACK': '1;38;5;95',
'BOLD_BLUE': '1;38;5;97',
'BOLD_CYAN': '1;38;5;39',
'BOLD_GREEN': '1;38;5;72',
'BOLD_INTENSE_BLACK': '1;38;5;95',
'BOLD_INTENSE_BLUE': '1;38;5;97',
'BOLD_INTENSE_CYAN': '1;38;5;79',
'BOLD_INTENSE_GREEN': '1;38;5;72',
'BOLD_INTENSE_PURPLE': '1;38;5;188',
'BOLD_INTENSE_RED': '1;38;5;203',
'BOLD_INTENSE_WHITE': '1;38;5;188',
'BOLD_INTENSE_YELLOW': '1;38;5;220',
'BOLD_PURPLE': '1;38;5;97',
'BOLD_RED': '1;38;5;203',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;95',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;97',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;39',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;72',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;95',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;97',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;79',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;72',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;188',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;203',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;188',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;220',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;97',
'BOLD_UNDERLINE_RED': '1;4;38;5;203',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;79',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;214',
'BOLD_WHITE': '1;38;5;79',
'BOLD_YELLOW': '1;38;5;214',
'CYAN': '38;5;39',
'GREEN': '38;5;72',
'INTENSE_BLACK': '38;5;95',
'INTENSE_BLUE': '38;5;97',
'INTENSE_CYAN': '38;5;79',
'INTENSE_GREEN': '38;5;72',
'INTENSE_PURPLE': '38;5;188',
'INTENSE_RED': '38;5;203',
'INTENSE_WHITE': '38;5;188',
'INTENSE_YELLOW': '38;5;220',
'NO_COLOR': '0',
'PURPLE': '38;5;97',
'RED': '38;5;203',
'UNDERLINE_BLACK': '4;38;5;95',
'UNDERLINE_BLUE': '4;38;5;97',
'UNDERLINE_CYAN': '4;38;5;39',
'UNDERLINE_GREEN': '4;38;5;72',
'UNDERLINE_INTENSE_BLACK': '4;38;5;95',
'UNDERLINE_INTENSE_BLUE': '4;38;5;97',
'UNDERLINE_INTENSE_CYAN': '4;38;5;79',
'UNDERLINE_INTENSE_GREEN': '4;38;5;72',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;188',
'UNDERLINE_INTENSE_RED': '4;38;5;203',
'UNDERLINE_INTENSE_WHITE': '4;38;5;188',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;220',
'UNDERLINE_PURPLE': '4;38;5;97',
'UNDERLINE_RED': '4;38;5;203',
'UNDERLINE_WHITE': '4;38;5;79',
'UNDERLINE_YELLOW': '4;38;5;214',
'WHITE': '38;5;79',
'YELLOW': '38;5;214',
}
PARAISO_LIGHT_STYLE = {
'BACKGROUND_BLACK': '48;5;16',
'BACKGROUND_BLUE': '48;5;16',
'BACKGROUND_CYAN': '48;5;39',
'BACKGROUND_GREEN': '48;5;72',
'BACKGROUND_INTENSE_BLACK': '48;5;16',
'BACKGROUND_INTENSE_BLUE': '48;5;97',
'BACKGROUND_INTENSE_CYAN': '48;5;79',
'BACKGROUND_INTENSE_GREEN': '48;5;72',
'BACKGROUND_INTENSE_PURPLE': '48;5;97',
'BACKGROUND_INTENSE_RED': '48;5;203',
'BACKGROUND_INTENSE_WHITE': '48;5;79',
'BACKGROUND_INTENSE_YELLOW': '48;5;220',
'BACKGROUND_PURPLE': '48;5;97',
'BACKGROUND_RED': '48;5;16',
'BACKGROUND_WHITE': '48;5;102',
'BACKGROUND_YELLOW': '48;5;214',
'BLACK': '38;5;16',
'BLUE': '38;5;16',
'BOLD_BLACK': '1;38;5;16',
'BOLD_BLUE': '1;38;5;16',
'BOLD_CYAN': '1;38;5;39',
'BOLD_GREEN': '1;38;5;72',
'BOLD_INTENSE_BLACK': '1;38;5;16',
'BOLD_INTENSE_BLUE': '1;38;5;97',
'BOLD_INTENSE_CYAN': '1;38;5;79',
'BOLD_INTENSE_GREEN': '1;38;5;72',
'BOLD_INTENSE_PURPLE': '1;38;5;97',
'BOLD_INTENSE_RED': '1;38;5;203',
'BOLD_INTENSE_WHITE': '1;38;5;79',
'BOLD_INTENSE_YELLOW': '1;38;5;220',
'BOLD_PURPLE': '1;38;5;97',
'BOLD_RED': '1;38;5;16',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;16',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;16',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;39',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;72',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;16',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;97',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;79',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;72',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;97',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;203',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;79',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;220',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;97',
'BOLD_UNDERLINE_RED': '1;4;38;5;16',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;102',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;214',
'BOLD_WHITE': '1;38;5;102',
'BOLD_YELLOW': '1;38;5;214',
'CYAN': '38;5;39',
'GREEN': '38;5;72',
'INTENSE_BLACK': '38;5;16',
'INTENSE_BLUE': '38;5;97',
'INTENSE_CYAN': '38;5;79',
'INTENSE_GREEN': '38;5;72',
'INTENSE_PURPLE': '38;5;97',
'INTENSE_RED': '38;5;203',
'INTENSE_WHITE': '38;5;79',
'INTENSE_YELLOW': '38;5;220',
'NO_COLOR': '0',
'PURPLE': '38;5;97',
'RED': '38;5;16',
'UNDERLINE_BLACK': '4;38;5;16',
'UNDERLINE_BLUE': '4;38;5;16',
'UNDERLINE_CYAN': '4;38;5;39',
'UNDERLINE_GREEN': '4;38;5;72',
'UNDERLINE_INTENSE_BLACK': '4;38;5;16',
'UNDERLINE_INTENSE_BLUE': '4;38;5;97',
'UNDERLINE_INTENSE_CYAN': '4;38;5;79',
'UNDERLINE_INTENSE_GREEN': '4;38;5;72',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;97',
'UNDERLINE_INTENSE_RED': '4;38;5;203',
'UNDERLINE_INTENSE_WHITE': '4;38;5;79',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;220',
'UNDERLINE_PURPLE': '4;38;5;97',
'UNDERLINE_RED': '4;38;5;16',
'UNDERLINE_WHITE': '4;38;5;102',
'UNDERLINE_YELLOW': '4;38;5;214',
'WHITE': '38;5;102',
'YELLOW': '38;5;214',
}
PASTIE_STYLE = {
'BACKGROUND_BLACK': '48;5;16',
'BACKGROUND_BLUE': '48;5;20',
'BACKGROUND_CYAN': '48;5;25',
'BACKGROUND_GREEN': '48;5;28',
'BACKGROUND_INTENSE_BLACK': '48;5;59',
'BACKGROUND_INTENSE_BLUE': '48;5;61',
'BACKGROUND_INTENSE_CYAN': '48;5;194',
'BACKGROUND_INTENSE_GREEN': '48;5;34',
'BACKGROUND_INTENSE_PURPLE': '48;5;188',
'BACKGROUND_INTENSE_RED': '48;5;172',
'BACKGROUND_INTENSE_WHITE': '48;5;15',
'BACKGROUND_INTENSE_YELLOW': '48;5;188',
'BACKGROUND_PURPLE': '48;5;125',
'BACKGROUND_RED': '48;5;124',
'BACKGROUND_WHITE': '48;5;145',
'BACKGROUND_YELLOW': '48;5;130',
'BLACK': '38;5;16',
'BLUE': '38;5;20',
'BOLD_BLACK': '1;38;5;16',
'BOLD_BLUE': '1;38;5;20',
'BOLD_CYAN': '1;38;5;25',
'BOLD_GREEN': '1;38;5;28',
'BOLD_INTENSE_BLACK': '1;38;5;59',
'BOLD_INTENSE_BLUE': '1;38;5;61',
'BOLD_INTENSE_CYAN': '1;38;5;194',
'BOLD_INTENSE_GREEN': '1;38;5;34',
'BOLD_INTENSE_PURPLE': '1;38;5;188',
'BOLD_INTENSE_RED': '1;38;5;172',
'BOLD_INTENSE_WHITE': '1;38;5;15',
'BOLD_INTENSE_YELLOW': '1;38;5;188',
'BOLD_PURPLE': '1;38;5;125',
'BOLD_RED': '1;38;5;124',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;16',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;20',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;25',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;28',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;59',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;61',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;194',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;34',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;188',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;172',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;15',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;188',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;125',
'BOLD_UNDERLINE_RED': '1;4;38;5;124',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;145',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;130',
'BOLD_WHITE': '1;38;5;145',
'BOLD_YELLOW': '1;38;5;130',
'CYAN': '38;5;25',
'GREEN': '38;5;28',
'INTENSE_BLACK': '38;5;59',
'INTENSE_BLUE': '38;5;61',
'INTENSE_CYAN': '38;5;194',
'INTENSE_GREEN': '38;5;34',
'INTENSE_PURPLE': '38;5;188',
'INTENSE_RED': '38;5;172',
'INTENSE_WHITE': '38;5;15',
'INTENSE_YELLOW': '38;5;188',
'NO_COLOR': '0',
'PURPLE': '38;5;125',
'RED': '38;5;124',
'UNDERLINE_BLACK': '4;38;5;16',
'UNDERLINE_BLUE': '4;38;5;20',
'UNDERLINE_CYAN': '4;38;5;25',
'UNDERLINE_GREEN': '4;38;5;28',
'UNDERLINE_INTENSE_BLACK': '4;38;5;59',
'UNDERLINE_INTENSE_BLUE': '4;38;5;61',
'UNDERLINE_INTENSE_CYAN': '4;38;5;194',
'UNDERLINE_INTENSE_GREEN': '4;38;5;34',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;188',
'UNDERLINE_INTENSE_RED': '4;38;5;172',
'UNDERLINE_INTENSE_WHITE': '4;38;5;15',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;188',
'UNDERLINE_PURPLE': '4;38;5;125',
'UNDERLINE_RED': '4;38;5;124',
'UNDERLINE_WHITE': '4;38;5;145',
'UNDERLINE_YELLOW': '4;38;5;130',
'WHITE': '38;5;145',
'YELLOW': '38;5;130',
}
PERLDOC_STYLE = {
'BACKGROUND_BLACK': '48;5;18',
'BACKGROUND_BLUE': '48;5;18',
'BACKGROUND_CYAN': '48;5;31',
'BACKGROUND_GREEN': '48;5;34',
'BACKGROUND_INTENSE_BLACK': '48;5;59',
'BACKGROUND_INTENSE_BLUE': '48;5;134',
'BACKGROUND_INTENSE_CYAN': '48;5;145',
'BACKGROUND_INTENSE_GREEN': '48;5;28',
'BACKGROUND_INTENSE_PURPLE': '48;5;134',
'BACKGROUND_INTENSE_RED': '48;5;167',
'BACKGROUND_INTENSE_WHITE': '48;5;188',
'BACKGROUND_INTENSE_YELLOW': '48;5;188',
'BACKGROUND_PURPLE': '48;5;90',
'BACKGROUND_RED': '48;5;124',
'BACKGROUND_WHITE': '48;5;145',
'BACKGROUND_YELLOW': '48;5;166',
'BLACK': '38;5;18',
'BLUE': '38;5;18',
'BOLD_BLACK': '1;38;5;18',
'BOLD_BLUE': '1;38;5;18',
'BOLD_CYAN': '1;38;5;31',
'BOLD_GREEN': '1;38;5;34',
'BOLD_INTENSE_BLACK': '1;38;5;59',
'BOLD_INTENSE_BLUE': '1;38;5;134',
'BOLD_INTENSE_CYAN': '1;38;5;145',
'BOLD_INTENSE_GREEN': '1;38;5;28',
'BOLD_INTENSE_PURPLE': '1;38;5;134',
'BOLD_INTENSE_RED': '1;38;5;167',
'BOLD_INTENSE_WHITE': '1;38;5;188',
'BOLD_INTENSE_YELLOW': '1;38;5;188',
'BOLD_PURPLE': '1;38;5;90',
'BOLD_RED': '1;38;5;124',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;18',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;18',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;31',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;34',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;59',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;134',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;145',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;28',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;134',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;167',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;188',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;188',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;90',
'BOLD_UNDERLINE_RED': '1;4;38;5;124',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;145',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;166',
'BOLD_WHITE': '1;38;5;145',
'BOLD_YELLOW': '1;38;5;166',
'CYAN': '38;5;31',
'GREEN': '38;5;34',
'INTENSE_BLACK': '38;5;59',
'INTENSE_BLUE': '38;5;134',
'INTENSE_CYAN': '38;5;145',
'INTENSE_GREEN': '38;5;28',
'INTENSE_PURPLE': '38;5;134',
'INTENSE_RED': '38;5;167',
'INTENSE_WHITE': '38;5;188',
'INTENSE_YELLOW': '38;5;188',
'NO_COLOR': '0',
'PURPLE': '38;5;90',
'RED': '38;5;124',
'UNDERLINE_BLACK': '4;38;5;18',
'UNDERLINE_BLUE': '4;38;5;18',
'UNDERLINE_CYAN': '4;38;5;31',
'UNDERLINE_GREEN': '4;38;5;34',
'UNDERLINE_INTENSE_BLACK': '4;38;5;59',
'UNDERLINE_INTENSE_BLUE': '4;38;5;134',
'UNDERLINE_INTENSE_CYAN': '4;38;5;145',
'UNDERLINE_INTENSE_GREEN': '4;38;5;28',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;134',
'UNDERLINE_INTENSE_RED': '4;38;5;167',
'UNDERLINE_INTENSE_WHITE': '4;38;5;188',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;188',
'UNDERLINE_PURPLE': '4;38;5;90',
'UNDERLINE_RED': '4;38;5;124',
'UNDERLINE_WHITE': '4;38;5;145',
'UNDERLINE_YELLOW': '4;38;5;166',
'WHITE': '38;5;145',
'YELLOW': '38;5;166',
}
RRT_STYLE = {
'BACKGROUND_BLACK': '48;5;09',
'BACKGROUND_BLUE': '48;5;117',
'BACKGROUND_CYAN': '48;5;117',
'BACKGROUND_GREEN': '48;5;46',
'BACKGROUND_INTENSE_BLACK': '48;5;117',
'BACKGROUND_INTENSE_BLUE': '48;5;117',
'BACKGROUND_INTENSE_CYAN': '48;5;122',
'BACKGROUND_INTENSE_GREEN': '48;5;46',
'BACKGROUND_INTENSE_PURPLE': '48;5;213',
'BACKGROUND_INTENSE_RED': '48;5;09',
'BACKGROUND_INTENSE_WHITE': '48;5;188',
'BACKGROUND_INTENSE_YELLOW': '48;5;222',
'BACKGROUND_PURPLE': '48;5;213',
'BACKGROUND_RED': '48;5;09',
'BACKGROUND_WHITE': '48;5;117',
'BACKGROUND_YELLOW': '48;5;09',
'BLACK': '38;5;09',
'BLUE': '38;5;117',
'BOLD_BLACK': '1;38;5;09',
'BOLD_BLUE': '1;38;5;117',
'BOLD_CYAN': '1;38;5;117',
'BOLD_GREEN': '1;38;5;46',
'BOLD_INTENSE_BLACK': '1;38;5;117',
'BOLD_INTENSE_BLUE': '1;38;5;117',
'BOLD_INTENSE_CYAN': '1;38;5;122',
'BOLD_INTENSE_GREEN': '1;38;5;46',
'BOLD_INTENSE_PURPLE': '1;38;5;213',
'BOLD_INTENSE_RED': '1;38;5;09',
'BOLD_INTENSE_WHITE': '1;38;5;188',
'BOLD_INTENSE_YELLOW': '1;38;5;222',
'BOLD_PURPLE': '1;38;5;213',
'BOLD_RED': '1;38;5;09',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;09',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;117',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;117',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;46',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;117',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;117',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;122',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;46',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;213',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;09',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;188',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;222',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;213',
'BOLD_UNDERLINE_RED': '1;4;38;5;09',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;117',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;09',
'BOLD_WHITE': '1;38;5;117',
'BOLD_YELLOW': '1;38;5;09',
'CYAN': '38;5;117',
'GREEN': '38;5;46',
'INTENSE_BLACK': '38;5;117',
'INTENSE_BLUE': '38;5;117',
'INTENSE_CYAN': '38;5;122',
'INTENSE_GREEN': '38;5;46',
'INTENSE_PURPLE': '38;5;213',
'INTENSE_RED': '38;5;09',
'INTENSE_WHITE': '38;5;188',
'INTENSE_YELLOW': '38;5;222',
'NO_COLOR': '0',
'PURPLE': '38;5;213',
'RED': '38;5;09',
'UNDERLINE_BLACK': '4;38;5;09',
'UNDERLINE_BLUE': '4;38;5;117',
'UNDERLINE_CYAN': '4;38;5;117',
'UNDERLINE_GREEN': '4;38;5;46',
'UNDERLINE_INTENSE_BLACK': '4;38;5;117',
'UNDERLINE_INTENSE_BLUE': '4;38;5;117',
'UNDERLINE_INTENSE_CYAN': '4;38;5;122',
'UNDERLINE_INTENSE_GREEN': '4;38;5;46',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;213',
'UNDERLINE_INTENSE_RED': '4;38;5;09',
'UNDERLINE_INTENSE_WHITE': '4;38;5;188',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;222',
'UNDERLINE_PURPLE': '4;38;5;213',
'UNDERLINE_RED': '4;38;5;09',
'UNDERLINE_WHITE': '4;38;5;117',
'UNDERLINE_YELLOW': '4;38;5;09',
'WHITE': '38;5;117',
'YELLOW': '38;5;09',
}
TANGO_STYLE = {
'BACKGROUND_BLACK': '48;5;16',
'BACKGROUND_BLUE': '48;5;20',
'BACKGROUND_CYAN': '48;5;61',
'BACKGROUND_GREEN': '48;5;34',
'BACKGROUND_INTENSE_BLACK': '48;5;24',
'BACKGROUND_INTENSE_BLUE': '48;5;62',
'BACKGROUND_INTENSE_CYAN': '48;5;15',
'BACKGROUND_INTENSE_GREEN': '48;5;64',
'BACKGROUND_INTENSE_PURPLE': '48;5;15',
'BACKGROUND_INTENSE_RED': '48;5;09',
'BACKGROUND_INTENSE_WHITE': '48;5;15',
'BACKGROUND_INTENSE_YELLOW': '48;5;178',
'BACKGROUND_PURPLE': '48;5;90',
'BACKGROUND_RED': '48;5;124',
'BACKGROUND_WHITE': '48;5;15',
'BACKGROUND_YELLOW': '48;5;94',
'BLACK': '38;5;16',
'BLUE': '38;5;20',
'BOLD_BLACK': '1;38;5;16',
'BOLD_BLUE': '1;38;5;20',
'BOLD_CYAN': '1;38;5;61',
'BOLD_GREEN': '1;38;5;34',
'BOLD_INTENSE_BLACK': '1;38;5;24',
'BOLD_INTENSE_BLUE': '1;38;5;62',
'BOLD_INTENSE_CYAN': '1;38;5;15',
'BOLD_INTENSE_GREEN': '1;38;5;64',
'BOLD_INTENSE_PURPLE': '1;38;5;15',
'BOLD_INTENSE_RED': '1;38;5;09',
'BOLD_INTENSE_WHITE': '1;38;5;15',
'BOLD_INTENSE_YELLOW': '1;38;5;178',
'BOLD_PURPLE': '1;38;5;90',
'BOLD_RED': '1;38;5;124',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;16',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;20',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;61',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;34',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;24',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;62',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;15',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;64',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;15',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;09',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;15',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;178',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;90',
'BOLD_UNDERLINE_RED': '1;4;38;5;124',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;15',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;94',
'BOLD_WHITE': '1;38;5;15',
'BOLD_YELLOW': '1;38;5;94',
'CYAN': '38;5;61',
'GREEN': '38;5;34',
'INTENSE_BLACK': '38;5;24',
'INTENSE_BLUE': '38;5;62',
'INTENSE_CYAN': '38;5;15',
'INTENSE_GREEN': '38;5;64',
'INTENSE_PURPLE': '38;5;15',
'INTENSE_RED': '38;5;09',
'INTENSE_WHITE': '38;5;15',
'INTENSE_YELLOW': '38;5;178',
'NO_COLOR': '0',
'PURPLE': '38;5;90',
'RED': '38;5;124',
'UNDERLINE_BLACK': '4;38;5;16',
'UNDERLINE_BLUE': '4;38;5;20',
'UNDERLINE_CYAN': '4;38;5;61',
'UNDERLINE_GREEN': '4;38;5;34',
'UNDERLINE_INTENSE_BLACK': '4;38;5;24',
'UNDERLINE_INTENSE_BLUE': '4;38;5;62',
'UNDERLINE_INTENSE_CYAN': '4;38;5;15',
'UNDERLINE_INTENSE_GREEN': '4;38;5;64',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;15',
'UNDERLINE_INTENSE_RED': '4;38;5;09',
'UNDERLINE_INTENSE_WHITE': '4;38;5;15',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;178',
'UNDERLINE_PURPLE': '4;38;5;90',
'UNDERLINE_RED': '4;38;5;124',
'UNDERLINE_WHITE': '4;38;5;15',
'UNDERLINE_YELLOW': '4;38;5;94',
'WHITE': '38;5;15',
'YELLOW': '38;5;94',
}
TRAC_STYLE = {
'BACKGROUND_BLACK': '48;5;16',
'BACKGROUND_BLUE': '48;5;18',
'BACKGROUND_CYAN': '48;5;30',
'BACKGROUND_GREEN': '48;5;100',
'BACKGROUND_INTENSE_BLACK': '48;5;59',
'BACKGROUND_INTENSE_BLUE': '48;5;60',
'BACKGROUND_INTENSE_CYAN': '48;5;194',
'BACKGROUND_INTENSE_GREEN': '48;5;102',
'BACKGROUND_INTENSE_PURPLE': '48;5;188',
'BACKGROUND_INTENSE_RED': '48;5;137',
'BACKGROUND_INTENSE_WHITE': '48;5;224',
'BACKGROUND_INTENSE_YELLOW': '48;5;188',
'BACKGROUND_PURPLE': '48;5;90',
'BACKGROUND_RED': '48;5;124',
'BACKGROUND_WHITE': '48;5;145',
'BACKGROUND_YELLOW': '48;5;100',
'BLACK': '38;5;16',
'BLUE': '38;5;18',
'BOLD_BLACK': '1;38;5;16',
'BOLD_BLUE': '1;38;5;18',
'BOLD_CYAN': '1;38;5;30',
'BOLD_GREEN': '1;38;5;100',
'BOLD_INTENSE_BLACK': '1;38;5;59',
'BOLD_INTENSE_BLUE': '1;38;5;60',
'BOLD_INTENSE_CYAN': '1;38;5;194',
'BOLD_INTENSE_GREEN': '1;38;5;102',
'BOLD_INTENSE_PURPLE': '1;38;5;188',
'BOLD_INTENSE_RED': '1;38;5;137',
'BOLD_INTENSE_WHITE': '1;38;5;224',
'BOLD_INTENSE_YELLOW': '1;38;5;188',
'BOLD_PURPLE': '1;38;5;90',
'BOLD_RED': '1;38;5;124',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;16',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;18',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;30',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;100',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;59',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;60',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;194',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;102',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;188',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;137',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;224',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;188',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;90',
'BOLD_UNDERLINE_RED': '1;4;38;5;124',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;145',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;100',
'BOLD_WHITE': '1;38;5;145',
'BOLD_YELLOW': '1;38;5;100',
'CYAN': '38;5;30',
'GREEN': '38;5;100',
'INTENSE_BLACK': '38;5;59',
'INTENSE_BLUE': '38;5;60',
'INTENSE_CYAN': '38;5;194',
'INTENSE_GREEN': '38;5;102',
'INTENSE_PURPLE': '38;5;188',
'INTENSE_RED': '38;5;137',
'INTENSE_WHITE': '38;5;224',
'INTENSE_YELLOW': '38;5;188',
'NO_COLOR': '0',
'PURPLE': '38;5;90',
'RED': '38;5;124',
'UNDERLINE_BLACK': '4;38;5;16',
'UNDERLINE_BLUE': '4;38;5;18',
'UNDERLINE_CYAN': '4;38;5;30',
'UNDERLINE_GREEN': '4;38;5;100',
'UNDERLINE_INTENSE_BLACK': '4;38;5;59',
'UNDERLINE_INTENSE_BLUE': '4;38;5;60',
'UNDERLINE_INTENSE_CYAN': '4;38;5;194',
'UNDERLINE_INTENSE_GREEN': '4;38;5;102',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;188',
'UNDERLINE_INTENSE_RED': '4;38;5;137',
'UNDERLINE_INTENSE_WHITE': '4;38;5;224',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;188',
'UNDERLINE_PURPLE': '4;38;5;90',
'UNDERLINE_RED': '4;38;5;124',
'UNDERLINE_WHITE': '4;38;5;145',
'UNDERLINE_YELLOW': '4;38;5;100',
'WHITE': '38;5;145',
'YELLOW': '38;5;100',
}
VIM_STYLE = {
'BACKGROUND_BLACK': '48;5;18',
'BACKGROUND_BLUE': '48;5;18',
'BACKGROUND_CYAN': '48;5;44',
'BACKGROUND_GREEN': '48;5;40',
'BACKGROUND_INTENSE_BLACK': '48;5;60',
'BACKGROUND_INTENSE_BLUE': '48;5;68',
'BACKGROUND_INTENSE_CYAN': '48;5;44',
'BACKGROUND_INTENSE_GREEN': '48;5;40',
'BACKGROUND_INTENSE_PURPLE': '48;5;164',
'BACKGROUND_INTENSE_RED': '48;5;09',
'BACKGROUND_INTENSE_WHITE': '48;5;188',
'BACKGROUND_INTENSE_YELLOW': '48;5;184',
'BACKGROUND_PURPLE': '48;5;164',
'BACKGROUND_RED': '48;5;160',
'BACKGROUND_WHITE': '48;5;188',
'BACKGROUND_YELLOW': '48;5;160',
'BLACK': '38;5;18',
'BLUE': '38;5;18',
'BOLD_BLACK': '1;38;5;18',
'BOLD_BLUE': '1;38;5;18',
'BOLD_CYAN': '1;38;5;44',
'BOLD_GREEN': '1;38;5;40',
'BOLD_INTENSE_BLACK': '1;38;5;60',
'BOLD_INTENSE_BLUE': '1;38;5;68',
'BOLD_INTENSE_CYAN': '1;38;5;44',
'BOLD_INTENSE_GREEN': '1;38;5;40',
'BOLD_INTENSE_PURPLE': '1;38;5;164',
'BOLD_INTENSE_RED': '1;38;5;09',
'BOLD_INTENSE_WHITE': '1;38;5;188',
'BOLD_INTENSE_YELLOW': '1;38;5;184',
'BOLD_PURPLE': '1;38;5;164',
'BOLD_RED': '1;38;5;160',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;18',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;18',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;44',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;40',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;60',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;68',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;44',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;40',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;164',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;09',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;188',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;184',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;164',
'BOLD_UNDERLINE_RED': '1;4;38;5;160',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;188',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;160',
'BOLD_WHITE': '1;38;5;188',
'BOLD_YELLOW': '1;38;5;160',
'CYAN': '38;5;44',
'GREEN': '38;5;40',
'INTENSE_BLACK': '38;5;60',
'INTENSE_BLUE': '38;5;68',
'INTENSE_CYAN': '38;5;44',
'INTENSE_GREEN': '38;5;40',
'INTENSE_PURPLE': '38;5;164',
'INTENSE_RED': '38;5;09',
'INTENSE_WHITE': '38;5;188',
'INTENSE_YELLOW': '38;5;184',
'NO_COLOR': '0',
'PURPLE': '38;5;164',
'RED': '38;5;160',
'UNDERLINE_BLACK': '4;38;5;18',
'UNDERLINE_BLUE': '4;38;5;18',
'UNDERLINE_CYAN': '4;38;5;44',
'UNDERLINE_GREEN': '4;38;5;40',
'UNDERLINE_INTENSE_BLACK': '4;38;5;60',
'UNDERLINE_INTENSE_BLUE': '4;38;5;68',
'UNDERLINE_INTENSE_CYAN': '4;38;5;44',
'UNDERLINE_INTENSE_GREEN': '4;38;5;40',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;164',
'UNDERLINE_INTENSE_RED': '4;38;5;09',
'UNDERLINE_INTENSE_WHITE': '4;38;5;188',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;184',
'UNDERLINE_PURPLE': '4;38;5;164',
'UNDERLINE_RED': '4;38;5;160',
'UNDERLINE_WHITE': '4;38;5;188',
'UNDERLINE_YELLOW': '4;38;5;160',
'WHITE': '38;5;188',
'YELLOW': '38;5;160',
}
VS_STYLE = {
'BACKGROUND_BLACK': '48;5;28',
'BACKGROUND_BLUE': '48;5;21',
'BACKGROUND_CYAN': '48;5;31',
'BACKGROUND_GREEN': '48;5;28',
'BACKGROUND_INTENSE_BLACK': '48;5;31',
'BACKGROUND_INTENSE_BLUE': '48;5;31',
'BACKGROUND_INTENSE_CYAN': '48;5;31',
'BACKGROUND_INTENSE_GREEN': '48;5;31',
'BACKGROUND_INTENSE_PURPLE': '48;5;31',
'BACKGROUND_INTENSE_RED': '48;5;09',
'BACKGROUND_INTENSE_WHITE': '48;5;31',
'BACKGROUND_INTENSE_YELLOW': '48;5;31',
'BACKGROUND_PURPLE': '48;5;124',
'BACKGROUND_RED': '48;5;124',
'BACKGROUND_WHITE': '48;5;31',
'BACKGROUND_YELLOW': '48;5;124',
'BLACK': '38;5;28',
'BLUE': '38;5;21',
'BOLD_BLACK': '1;38;5;28',
'BOLD_BLUE': '1;38;5;21',
'BOLD_CYAN': '1;38;5;31',
'BOLD_GREEN': '1;38;5;28',
'BOLD_INTENSE_BLACK': '1;38;5;31',
'BOLD_INTENSE_BLUE': '1;38;5;31',
'BOLD_INTENSE_CYAN': '1;38;5;31',
'BOLD_INTENSE_GREEN': '1;38;5;31',
'BOLD_INTENSE_PURPLE': '1;38;5;31',
'BOLD_INTENSE_RED': '1;38;5;09',
'BOLD_INTENSE_WHITE': '1;38;5;31',
'BOLD_INTENSE_YELLOW': '1;38;5;31',
'BOLD_PURPLE': '1;38;5;124',
'BOLD_RED': '1;38;5;124',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;28',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;21',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;31',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;28',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;31',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;31',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;31',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;31',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;31',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;09',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;31',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;31',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;124',
'BOLD_UNDERLINE_RED': '1;4;38;5;124',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;31',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;124',
'BOLD_WHITE': '1;38;5;31',
'BOLD_YELLOW': '1;38;5;124',
'CYAN': '38;5;31',
'GREEN': '38;5;28',
'INTENSE_BLACK': '38;5;31',
'INTENSE_BLUE': '38;5;31',
'INTENSE_CYAN': '38;5;31',
'INTENSE_GREEN': '38;5;31',
'INTENSE_PURPLE': '38;5;31',
'INTENSE_RED': '38;5;09',
'INTENSE_WHITE': '38;5;31',
'INTENSE_YELLOW': '38;5;31',
'NO_COLOR': '0',
'PURPLE': '38;5;124',
'RED': '38;5;124',
'UNDERLINE_BLACK': '4;38;5;28',
'UNDERLINE_BLUE': '4;38;5;21',
'UNDERLINE_CYAN': '4;38;5;31',
'UNDERLINE_GREEN': '4;38;5;28',
'UNDERLINE_INTENSE_BLACK': '4;38;5;31',
'UNDERLINE_INTENSE_BLUE': '4;38;5;31',
'UNDERLINE_INTENSE_CYAN': '4;38;5;31',
'UNDERLINE_INTENSE_GREEN': '4;38;5;31',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;31',
'UNDERLINE_INTENSE_RED': '4;38;5;09',
'UNDERLINE_INTENSE_WHITE': '4;38;5;31',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;31',
'UNDERLINE_PURPLE': '4;38;5;124',
'UNDERLINE_RED': '4;38;5;124',
'UNDERLINE_WHITE': '4;38;5;31',
'UNDERLINE_YELLOW': '4;38;5;124',
'WHITE': '38;5;31',
'YELLOW': '38;5;124',
}
XCODE_STYLE = {
'BACKGROUND_BLACK': '48;5;16',
'BACKGROUND_BLUE': '48;5;20',
'BACKGROUND_CYAN': '48;5;60',
'BACKGROUND_GREEN': '48;5;28',
'BACKGROUND_INTENSE_BLACK': '48;5;60',
'BACKGROUND_INTENSE_BLUE': '48;5;20',
'BACKGROUND_INTENSE_CYAN': '48;5;60',
'BACKGROUND_INTENSE_GREEN': '48;5;60',
'BACKGROUND_INTENSE_PURPLE': '48;5;126',
'BACKGROUND_INTENSE_RED': '48;5;160',
'BACKGROUND_INTENSE_WHITE': '48;5;60',
'BACKGROUND_INTENSE_YELLOW': '48;5;94',
'BACKGROUND_PURPLE': '48;5;126',
'BACKGROUND_RED': '48;5;160',
'BACKGROUND_WHITE': '48;5;60',
'BACKGROUND_YELLOW': '48;5;94',
'BLACK': '38;5;16',
'BLUE': '38;5;20',
'BOLD_BLACK': '1;38;5;16',
'BOLD_BLUE': '1;38;5;20',
'BOLD_CYAN': '1;38;5;60',
'BOLD_GREEN': '1;38;5;28',
'BOLD_INTENSE_BLACK': '1;38;5;60',
'BOLD_INTENSE_BLUE': '1;38;5;20',
'BOLD_INTENSE_CYAN': '1;38;5;60',
'BOLD_INTENSE_GREEN': '1;38;5;60',
'BOLD_INTENSE_PURPLE': '1;38;5;126',
'BOLD_INTENSE_RED': '1;38;5;160',
'BOLD_INTENSE_WHITE': '1;38;5;60',
'BOLD_INTENSE_YELLOW': '1;38;5;94',
'BOLD_PURPLE': '1;38;5;126',
'BOLD_RED': '1;38;5;160',
'BOLD_UNDERLINE_BLACK': '1;4;38;5;16',
'BOLD_UNDERLINE_BLUE': '1;4;38;5;20',
'BOLD_UNDERLINE_CYAN': '1;4;38;5;60',
'BOLD_UNDERLINE_GREEN': '1;4;38;5;28',
'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;38;5;60',
'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;38;5;20',
'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;38;5;60',
'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;38;5;60',
'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;38;5;126',
'BOLD_UNDERLINE_INTENSE_RED': '1;4;38;5;160',
'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;38;5;60',
'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;38;5;94',
'BOLD_UNDERLINE_PURPLE': '1;4;38;5;126',
'BOLD_UNDERLINE_RED': '1;4;38;5;160',
'BOLD_UNDERLINE_WHITE': '1;4;38;5;60',
'BOLD_UNDERLINE_YELLOW': '1;4;38;5;94',
'BOLD_WHITE': '1;38;5;60',
'BOLD_YELLOW': '1;38;5;94',
'CYAN': '38;5;60',
'GREEN': '38;5;28',
'INTENSE_BLACK': '38;5;60',
'INTENSE_BLUE': '38;5;20',
'INTENSE_CYAN': '38;5;60',
'INTENSE_GREEN': '38;5;60',
'INTENSE_PURPLE': '38;5;126',
'INTENSE_RED': '38;5;160',
'INTENSE_WHITE': '38;5;60',
'INTENSE_YELLOW': '38;5;94',
'NO_COLOR': '0',
'PURPLE': '38;5;126',
'RED': '38;5;160',
'UNDERLINE_BLACK': '4;38;5;16',
'UNDERLINE_BLUE': '4;38;5;20',
'UNDERLINE_CYAN': '4;38;5;60',
'UNDERLINE_GREEN': '4;38;5;28',
'UNDERLINE_INTENSE_BLACK': '4;38;5;60',
'UNDERLINE_INTENSE_BLUE': '4;38;5;20',
'UNDERLINE_INTENSE_CYAN': '4;38;5;60',
'UNDERLINE_INTENSE_GREEN': '4;38;5;60',
'UNDERLINE_INTENSE_PURPLE': '4;38;5;126',
'UNDERLINE_INTENSE_RED': '4;38;5;160',
'UNDERLINE_INTENSE_WHITE': '4;38;5;60',
'UNDERLINE_INTENSE_YELLOW': '4;38;5;94',
'UNDERLINE_PURPLE': '4;38;5;126',
'UNDERLINE_RED': '4;38;5;160',
'UNDERLINE_WHITE': '4;38;5;60',
'UNDERLINE_YELLOW': '4;38;5;94',
'WHITE': '38;5;60',
'YELLOW': '38;5;94',
}
STYLES = {
'algol': ALGOL_STYLE,
'algol_nu': ALGOL_NU_STYLE,
'autumn': AUTUMN_STYLE,
'borland': BORLAND_STYLE,
'bw': BW_STYLE,
'colorful': COLORFUL_STYLE,
'default': DEFAULT_STYLE,
'emacs': EMACS_STYLE,
'friendly': FRIENDLY_STYLE,
'fruity': FRUITY_STYLE,
'igor': IGOR_STYLE,
'lovelace': LOVELACE_STYLE,
'manni': MANNI_STYLE,
'monokai': MONOKAI_STYLE,
'murphy': MURPHY_STYLE,
'native': NATIVE_STYLE,
'paraiso-dark': PARAISO_DARK_STYLE,
'paraiso-light': PARAISO_LIGHT_STYLE,
'pastie': PASTIE_STYLE,
'perldoc': PERLDOC_STYLE,
'rrt': RRT_STYLE,
'tango': TANGO_STYLE,
'trac': TRAC_STYLE,
'vim': VIM_STYLE,
'vs': VS_STYLE,
'xcode': XCODE_STYLE,
}
| {
"repo_name": "scopatz/xolors",
"path": "ansi_colors.py",
"copies": "1",
"size": "77701",
"license": "bsd-2-clause",
"hash": -1005166823304271000,
"line_mean": 35.5136278195,
"line_max": 52,
"alpha_frac": 0.5694778703,
"autogenerated": false,
"ratio": 2.1441264935566653,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.3213604363856666,
"avg_score": null,
"num_lines": null
} |
# Algorithm-2017-09-11.py
# coding: utf-8
# In[ ]:
import pandas as pd
import numpy as np
from pprint import pprint
import math
from utils import import_net, export_net
from time import time
# In[ ]:
def sort_nodes(t):
v = t[1]
return -v[0], -v[1]
def BFS(graph, start):
""" André, please clarify:
this code is adding +1 to the second counter
if the node connected by an outgoing edge
also has an outgoing edge going back to the parent node
is this what we were looking for?
my understanding is that we wanted to count the number of incoming nodes,
regardless of its origin
"""
queue = list()
output = dict()
visited = set()
queue.append(start)
while queue:
node = queue.pop(0)
visited.add(node)
if node in output:
output[node][0] = len(graph[node])
else:
output[node] = [len(graph[node]), 0]
for adjacent in graph[node]:
# check logic of next two lines
if adjacent in output:
output[adjacent][1] += 1;
else:
output[adjacent] = [0, 1];
if adjacent not in visited:
visited.add(adjacent)
if adjacent in graph:
queue.append(adjacent)
return sorted(output.items(), key=sort_nodes), visited
# In[ ]:
def sort_forests(f):
return -len(f)
def alg(network, all_nodes):
""" Run BFS for the entire network """
num_nodes = len(all_nodes)
output = {}
visited_nodes = set()
for node in all_nodes:
if node not in visited_nodes:
forest, visited = BFS(network, node)
visited_nodes.update(visited)
# calcular score
forest_size = len(forest)
for node in forest:
node_id = node[0]
num_incoming_nodes = node[1][0]
num_outgoing_nodes = node[1][1]
score = (num_incoming_nodes+num_outgoing_nodes) * forest_size / num_nodes
if node_id not in output:
output[node_id] = score
else:
output[node_id] += score
forest_size -= 1
return sorted(output.items(), key=lambda x:-x[1])
# In[ ]:
### ALTERNATIVE: RUN THIS TO TEST
network = { 'A': ['B', 'C'],
'B': ['A', 'C'],
'C': ['B', 'D', 'E' ,'F'],
'D': ['E'],
'E': ['C'],
'F': ['B', 'D'],
'G': ['H', 'I','C'],
'H': ['G'],
'I': ['G', 'J']}
all_nodes = ['A', 'B', 'C', 'D', 'E',
'F', 'G', 'H', 'I', 'J']
# results = alg(network, all_nodes)
# pprint(results)
## ON REAL DATA
first = True
for ftype in ['real','model']:
# for ftype in ['real']:
for i in range(1,5):
t0 = time()
network_name = ftype+str(i)
network, all_nodes = import_net(network_name)
results = alg(network, all_nodes)
print('Nodes in results: ' + str(len(results)))
print("Run-time (in minutes): " + str(int((time()-t0)/60)))
if first:
export_net(results, network_name, 'results-full-v4.csv', first=True)
first = False
else:
export_net(results, network_name, 'results-full-v4.csv', first=False)
| {
"repo_name": "lucasosouza/graph-competition",
"path": "Algorithm-2017-09-11.py",
"copies": "1",
"size": "3441",
"license": "mit",
"hash": 261155934437804740,
"line_mean": 25.2595419847,
"line_max": 89,
"alpha_frac": 0.499127907,
"autogenerated": false,
"ratio": 3.675213675213675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46743415822136747,
"avg_score": null,
"num_lines": null
} |
""" Algorithm evaluators for Rigor """
from __future__ import print_function
from collections import defaultdict
import sys
class ObjectAreaEvaluator(object):
"""
Compares ground truth to detections using Wolf and Jolion's algorithm.
:param scatter_punishment: :math:`f_{sc}(k)` "a parameter function of the evaluation scheme which controls the amount of punishment which is inflicted in case of scattering, i.e. splits or merges"
:type scatter_punishment: lambda(x): -> float
:param float precision_threshold: :math:`t_{p}` in [1]_
:param float recall_threshold: :math:`t_{r}` in [1]_
.. seealso::
Object count/Area Graphs for the Evaluation of Object Detection and Segmentation Algorithms [1]_
.. [1] http://liris.cnrs.fr/Documents/Liris-2216.pdf
"""
import numpy as np
from shapely.geometry import Polygon
def __init__(self, scatter_punishment=lambda(k): 1.0, precision_threshold=0.4, recall_threshold=0.8):
self.scatter_punishment = scatter_punishment
self.precision_threshold = precision_threshold
self.recall_threshold = recall_threshold
@staticmethod
def non_zero_polygon(polygon, suppress_warning=False):
"""
Checks that a polygon has a nonzero area. If the area is zero, it will be
dilated by a small amount so that overlap and such can be measured.
:param polygon: The polygon to test
:type polygon: :py:class:`~shapely.Polygon`
:param bool suppress_warning: If :py:const:`False`, a warning will be printed if the area is dilated; if :py:const:`True`, no warning will be printed.
:return: the original :py:class:`~shapely.Polygon`, possibly dilated
"""
if polygon.area > 0:
return polygon
if not suppress_warning:
print("Warning: polygon has zero area; dilating", file=sys.stderr)
return polygon.buffer(0.05, 1).convex_hull
@classmethod
def prune_and_polygon(cls, ground_truths, detections):
"""
Given either :py:class:`~shapely.Polygon` instances or plain-Python sequences of
vertices, returns a tuple of ground truth and detection :py:class:`~shapely.Polygon`
instances, excluding ground truth polygons that have zero length
"""
if not hasattr(ground_truths[0], 'intersection'):
ground_truths = [cls.Polygon(value) for value in ground_truths]
if not hasattr(detections[0], 'intersection'):
detections = [cls.Polygon(value) for value in detections]
ground_truths = [value for value in ground_truths if value.length > 0.]
return (ground_truths, detections)
@classmethod
def build_matrices(cls, ground_truths, detections):
"""
Builds a set of matrices containing measurements of overlap between ground
truth and detections.
:param ground_truths: Sequence of ground truth polygons
:param detections: Sequence of detected polygons
:return: Tuple of :py:class:`numpy.array` arrays: (ground truth matches, detection matches)
"""
ground_truth_count = len(ground_truths)
detection_count = len(detections)
recall_matrix = cls.np.empty((ground_truth_count, detection_count), dtype=float)
precision_matrix = cls.np.empty((ground_truth_count, detection_count), dtype=float)
for gt_index in range(ground_truth_count):
ground_truth = ObjectAreaEvaluator.non_zero_polygon(ground_truths[gt_index])
for det_index in range(detection_count):
detection = ObjectAreaEvaluator.non_zero_polygon(detections[det_index])
overlap_polygon = ground_truth.intersection(detection)
precision_area = overlap_polygon.area / detection.area
precision_matrix[gt_index, det_index] = precision_area
recall_area = overlap_polygon.area / ground_truth.area
recall_matrix[gt_index, det_index] = recall_area
return (precision_matrix, recall_matrix)
def match_detections(self, ground_truths, detections):
""" Compares ground_truths to detections """
if not ground_truths or not detections:
return (0., 0., (0., len(detections)), (0., len(ground_truths)))
ground_truths, detections = ObjectAreaEvaluator.prune_and_polygon(ground_truths, detections)
ground_truth_count = len(ground_truths)
detection_count = len(detections)
if ground_truth_count == 0 or detection_count == 0:
return (0., 0., (0., float(detection_count)), (0., float(ground_truth_count)))
precision_matrix, recall_matrix = self.build_matrices(ground_truths, detections)
ground_truth_count = precision_matrix.shape[0]
detection_count = precision_matrix.shape[1]
ground_truth_sets_precision = defaultdict(set) # number of ground truth items that match a particular detection in the precision matrix
detection_sets_precision = defaultdict(set) # number of detection items that match a particular ground truth in the precision matrix
ground_truth_sets_recall = defaultdict(set) # number of ground truth items that match a particular detection in the recall matrix
detection_sets_recall = defaultdict(set) # number of detection items that match a particular ground truth in the recall matrix
for gt_index in range(ground_truth_count):
for det_index in range(detection_count):
if precision_matrix[gt_index, det_index] >= self.precision_threshold:
ground_truth_sets_precision[det_index].add(gt_index)
detection_sets_precision[gt_index].add(det_index)
if recall_matrix[gt_index, det_index] >= self.recall_threshold:
ground_truth_sets_recall[det_index].add(gt_index)
detection_sets_recall[gt_index].add(det_index)
match_ground_truth = [list() for _ in range(ground_truth_count)]
match_detection = [list() for _ in range(detection_count)]
for gt_index in detection_sets_precision:
matching_detections_precision = detection_sets_precision[gt_index]
if len(matching_detections_precision) == 1:
(detection_precision, ) = matching_detections_precision
if len(ground_truth_sets_precision[detection_precision]) == 1:
match_ground_truth[gt_index].append(detection_precision)
else:
# one-to-many (one ground truth to many detections)
gt_sum = 0.
for detection_precision in matching_detections_precision:
gt_sum += recall_matrix[gt_index, detection_precision]
if gt_sum >= self.recall_threshold:
for detection_precision in matching_detections_precision:
match_ground_truth[gt_index].append(detection_precision)
match_detection[detection_precision].append(gt_index)
for det_index in ground_truth_sets_recall:
matching_ground_truths_recall = ground_truth_sets_recall[det_index]
if len(matching_ground_truths_recall) == 1:
(ground_truth_recall, ) = matching_ground_truths_recall
if len(detection_sets_recall[ground_truth_recall]) == 1:
match_detection[det_index].append(ground_truth_recall)
else:
# many-to-one (many ground truths covered by one detection)
det_sum = 0
for ground_truth_recall in matching_ground_truths_recall:
det_sum += precision_matrix[ground_truth_recall, det_index]
if det_sum >= self.precision_threshold:
for ground_truth_recall in matching_ground_truths_recall:
det_sum += precision_matrix[ground_truth_recall, det_index]
match_detection[det_index].append(ground_truth_recall)
match_ground_truth[ground_truth_recall].append(det_index)
return match_ground_truth, match_detection
def evaluate(self, ground_truths, detections):
r"""
Given lists of polylines for each parameter (ground_truths, detections),
this will check the overlap and return a (precision, recall, (:math:`\sum Match_D`,
:math:`|D|`), (:math:`\sum Match_G`, :math:`|G|`)) tuple for the overall image.
ground_truths and detections should both be sequences of (x,y) point tuples.
"""
(0.5714285714285714, 0.8, (4.0, 7.0), (4.0, 5.0))
if not ground_truths or not detections:
return (0.,0.,(0.,len(detections)), (0., len(ground_truths)))
ground_truths, detections = ObjectAreaEvaluator.prune_and_polygon(ground_truths, detections)
ground_truth_count = len(ground_truths)
detection_count = len(detections)
if ground_truth_count == 0 or detection_count == 0:
return (0., 0., (0., float(detection_count)), (0., float(ground_truth_count)))
precision_matrix, recall_matrix = self.build_matrices(ground_truths, detections)
return self.evaluate_matrices(precision_matrix, recall_matrix)
def evaluate_matrices(self, precision_matrix, recall_matrix):
"""
Given a precision and recall matrix (2d matrix; rows are ground truth,
columns are detections) containing overlap between each pair of ground
truth and detection polygons, this will run the match functions over the
matrix and return a (precision, recall, (:math:`\sum Match_D`, :math:`|D|`), (:math:`\sum Match_G`,
:math:`|G|`)) tuple for the overall image.
"""
ground_truth_count = precision_matrix.shape[0]
detection_count = precision_matrix.shape[1]
ground_truth_sets_precision = defaultdict(set) # number of ground truth items that match a particular detection in the precision matrix
detection_sets_precision = defaultdict(set) # number of detection items that match a particular ground truth in the precision matrix
ground_truth_sets_recall = defaultdict(set) # number of ground truth items that match a particular detection in the recall matrix
detection_sets_recall = defaultdict(set) # number of detection items that match a particular ground truth in the recall matrix
for gt_index in range(ground_truth_count):
for det_index in range(detection_count):
if precision_matrix[gt_index, det_index] >= self.precision_threshold:
ground_truth_sets_precision[det_index].add(gt_index)
detection_sets_precision[gt_index].add(det_index)
if recall_matrix[gt_index, det_index] >= self.recall_threshold:
ground_truth_sets_recall[det_index].add(gt_index)
detection_sets_recall[gt_index].add(det_index)
match_ground_truth = 0. # sum of MatchG
match_detection = 0. # sum of MatchD
one_to_one_precision = set()
for gt_index in detection_sets_precision:
matching_detections_precision = detection_sets_precision[gt_index]
if len(matching_detections_precision) == 1:
(detection_precision, ) = matching_detections_precision
if len(ground_truth_sets_precision[detection_precision]) == 1:
one_to_one_precision.add((gt_index, detection_precision))
else:
# one-to-many (one ground truth to many detections)
gt_sum = 0.
for detection_precision in matching_detections_precision:
gt_sum += recall_matrix[gt_index, detection_precision]
if gt_sum >= self.recall_threshold:
#print("1:N ~ GT {} : DT {}".format(gt_index,matching_detections_precision))
match_ground_truth += self.scatter_punishment(matching_detections_precision)
match_detection += len(matching_detections_precision) * self.scatter_punishment(matching_detections_precision)
one_to_one_recall = set()
for det_index in ground_truth_sets_recall:
matching_ground_truths_recall = ground_truth_sets_recall[det_index]
if len(matching_ground_truths_recall) == 1:
(ground_truth_recall, ) = matching_ground_truths_recall
if len(detection_sets_recall[ground_truth_recall]) == 1:
one_to_one_recall.add((ground_truth_recall, det_index))
else:
# many-to-one (many ground truths covered by one detection)
det_sum = 0
for ground_truth_recall in matching_ground_truths_recall:
det_sum += precision_matrix[ground_truth_recall, det_index]
if det_sum >= self.precision_threshold:
#print("N:1 ~ DT {} : GT {}".format(det_index,matching_ground_truths_recall))
match_detection += self.scatter_punishment(matching_ground_truths_recall)
match_ground_truth += len(matching_ground_truths_recall) * self.scatter_punishment(matching_ground_truths_recall)
one_to_one_matches = one_to_one_precision & one_to_one_recall
match_ground_truth += len(one_to_one_matches)
match_detection += len(one_to_one_matches)
recall = match_ground_truth / float(ground_truth_count)
precision = match_detection / float(detection_count)
return (precision, recall, (match_detection, float(detection_count)), (match_ground_truth, float(ground_truth_count)))
| {
"repo_name": "blindsightcorp/rigor",
"path": "lib/evaluator.py",
"copies": "1",
"size": "11995",
"license": "bsd-2-clause",
"hash": 2494505943603799600,
"line_mean": 48.1598360656,
"line_max": 197,
"alpha_frac": 0.7323051271,
"autogenerated": false,
"ratio": 3.319955715471907,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9413196156588519,
"avg_score": 0.02781293719667742,
"num_lines": 244
} |
"""Algorithm for 2048 game."""
import random
OFFSETS = {'UP': (1, 0),
'DOWN': (-1, 0),
'LEFT': (0, 1),
'RIGHT': (0, -1)}
def openFile(score):
try:
scoreFile = file("highscore.txt", "r+")
scoreList = [0]
for line in scoreFile:
scoreList.append(int(line))
except IOError:
scoreFile = file("highscore.txt", "w")
scoreList = [0]
def merge(line, reverse=False):
target = line[:]
if reverse:
target.reverse()
target.sort(key=lambda x:bool(x), reverse=True)
for num in range(len(target)-1):
if target[num] == target[num+1]:
target[num] += target[num+1]
target[num+1] = 0
target.sort(key=lambda x:bool(x), reverse=True)
if reverse:
target.reverse()
return target
class Gameplay:
def __init__(self, grid_height=4, grid_width=4):
self._grid_height = grid_height
self._grid_width = grid_width
self.state = True
self.reset()
def reset(self):
self._grid = [[0 for num in range(self._grid_width)]
for num in range(self._grid_height)]
self.new_tile()
self.new_tile()
def __str__(self):
debug_str = ""
for row in self._grid:
debug_str += str(row) + "\n"
return debug_str
def get_grid_height(self):
return self._grid_height
def get_grid_width(self):
return self._grid_width
def get_state(self):
return self.state
def get_score(self):
score = 0
for row in self._grid:
for num in row:
score += num
return score
def move(self, direction):
new_grid = [[0 for num in range(self._grid_width)]
for num in range(self._grid_height)]
# for row_num in range(self.grid_height):
# for col_num in range(self.grid_width):
# new_grid[row_num + OFFSETS[direction][0]][col_num + OFFSETS[direction][1]] = self.grid[row_num][col_num]
temp_list = list()
direction = str(direction)
if OFFSETS[direction][0]:
for num in range(self._grid_width):
temp_list = list()
for row in range(self._grid_height):
temp_list.append(self._grid[row][num])
temp_list = merge(temp_list, reverse=(OFFSETS[direction][0]<0))
for row in range(self._grid_height):
new_grid[row][num] = temp_list[row]
else:
for row in range(self._grid_height):
new_grid[row] = merge(self._grid[row], reverse=(OFFSETS[direction][1]<0))
if not self._grid == new_grid:
self._grid = new_grid
self.new_tile()
return True
else:
self.check_state()
return False
def new_tile(self):
blank_list = list()
for row in range(self._grid_height):
for num in range(self._grid_width):
if self._grid[row][num] == 0:
blank_list.append((row, num))
if blank_list:
blank_pos = random.choice(blank_list)
if random.random() <= 0.9:
self._grid[blank_pos[0]][blank_pos[1]] = 2
else:
self._grid[blank_pos[0]][blank_pos[1]] = 4
else:
self.state = False
def check_state(self):
for row in self._grid:
for num in row:
if num == 0:
return True
self.state = False
def get_state(self):
return self.state
def set_tile(self, row, col, value):
self._grid[row][col] = value
def get_tile(self, row, col):
# replace with your code
return self._grid[row][col]
def set_grid(self, grid):
self._grid = grid
def clone(self):
new_clone = Gameplay(self._grid_height, self._grid_width)
new_clone.set_grid(self._grid)
return new_clone
# poc_2048_gui.run_gui(TwentyFortyEight(5, 4))
| {
"repo_name": "WillSkywalker/2048_monte_carlo",
"path": "_2048.py",
"copies": "1",
"size": "4116",
"license": "apache-2.0",
"hash": -5386982993453591000,
"line_mean": 27.3862068966,
"line_max": 122,
"alpha_frac": 0.5160349854,
"autogenerated": false,
"ratio": 3.6619217081850532,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4677956693585053,
"avg_score": null,
"num_lines": null
} |
"""Algorithm for basic classtering."""
import numpy as np
from mutil import p_info
from bunch import Bunch
L_algorithm = ['alg1']
Default_param = Bunch(name='lr')
def example():
comat, l_freq = generate_comat()
group_comat(comat, l_freq, n_sample=100)
def group_comat(comat, hist, n_sample, th_pmi=0.0, th_js=0.0):
n_label = comat.shape[0]
mat_pmi = np.zeros((n_label, n_label))
mat_js = np.zeros((n_label, n_label))
p_info('Compute Distance Matrix')
for i in range(n_label):
for j in range(n_label):
if j < i:
co_pmi = pmi(comat[i, j], hist[i], hist[j], n_sample)
mat_pmi[i][j] = mat_pmi[j][i] = co_pmi
co_js = js_div(distribution(comat[i]), distribution(comat[j]))
mat_js[i][j] = mat_js[j][i] = co_js
p_info('Compute Group')
l_group = np.arange(n_label)
js_counter = 0
pmi_counter = 0
for i in range(1, n_label):
for j in range(i):
if mat_js[i][j] < th_js:
l_group[i] = l_group[j]
js_counter += 1
elif mat_pmi[i][j] > th_pmi:
l_group[i] = l_group[j]
pmi_counter += 1
print (js_counter, pmi_counter)
return l_group
def pmi(freq_ij, freq_i, freq_j, n_all):
return np.log2(float(n_all * freq_ij) / (freq_i * freq_j))
def js_div(p_1, p_2):
def kld(p1, p2):
p2[np.where(p2 == 0.)] = p1[np.where(p2 == 0.)]
kld = p1 * np.log2(p1 / p2)
kld[np.where(np.isnan(kld))] = 0.
return sum(kld)
m = 0.5 * (p_1 + p_2)
jsd = 0.5 * (kld(p_1, m) + kld(p_2, m))
return jsd
def euc_div(p_1, p_2):
return np.linalg.norm(p_1 - p_2)
def distribution(histogram):
return histogram.astype('float') / sum(histogram)
def generate_comat():
n_size = 10
min_freq = 10
max_freq = 30
comat = np.zeros((n_size, n_size)).astype('int')
l_freq = [np.random.randint(min_freq, max_freq) for i in range(n_size)]
for i in range(n_size):
for j in range(n_size):
if j < i:
max_val = min(l_freq[i], l_freq[j])
value = np.random.randint(max_val)
comat[i][j] = value
comat[j][i] = value
elif j == i:
comat[i][i] = 0
return comat, l_freq
| {
"repo_name": "belemizz/mimic2_tools",
"path": "clinical_db/alg/clustering.py",
"copies": "1",
"size": "2348",
"license": "mit",
"hash": 5551519073343978000,
"line_mean": 25.9885057471,
"line_max": 78,
"alpha_frac": 0.5195911414,
"autogenerated": false,
"ratio": 2.705069124423963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3724660265823963,
"avg_score": null,
"num_lines": null
} |
# Algorithm for determining chord symbols based on frequency spectrum
from __future__ import division
import math
samplingFrequency = 2000
bufferSize = 1024
referenceFrequency = 130.81278265 # C
numHarmonics = 2
numOctaves = 4
numBinsToSearch = 2
noteFrequencies = []
chromagram = [0.0000000000000000000]*12
noteNames = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
qualities = ["min", "maj", "sus", "", "-", "+"]
print "Reference Frequencies (hz):"
for i in range(0, 12):
freq = referenceFrequency*math.pow(2, i/12)
print noteNames[i] + ": " + str(freq)
noteFrequencies.append(freq)
# take a frequency vector and then the audio values for each of those frequencies
def calculateChromagram(freq, m):
divisorRatio = (samplingFrequency/4.0)/bufferSize
for n in range(0, 12):
chromaSum = 0
for octave in range(1, numOctaves):
noteSum = 0
for harmonic in range(1, numHarmonics):
centerBin = round((noteFrequencies[n]*octave*harmonic)/divisorRatio)
minBin = centerBin - (numBinsToSearch*harmonic)
maxBin = centerBin + (numBinsToSearch*harmonic)
minIndex = min(range(len(freq)), key=lambda i: abs(freq[i]-minBin))
maxIndex = min(range(len(freq)), key=lambda i: abs(freq[i]-maxBin))
maxVal = 0
for k in range(int(minIndex), int(maxIndex)):
if (m[k] > maxVal):
maxVal = m[k]
noteSum += (maxVal / harmonic)
chromaSum += noteSum
chromagram[n] = chromaSum
return chromagram
class ChordDetector:
ChordQuality = ["Minor", "Major", "Suspended", "Dominant", "Diminished5th", "Augmented5th"]
bias = 1.06
rootNote = 0
quality = ""
intervals = 0
chromagram = [0]*12
chordProfiles = []
chord = [0]*108
for j in range(0, 108):
tmp = [];
for t in range(0, 12):
tmp.append(0)
chordProfiles.append(tmp)
def __init__(self):
self.makechordProfiles()
def makechordProfiles(self):
i = int()
t = int()
j = 0
root = int()
third = int()
fifth = int()
seventh = int()
v1 = 1
v2 = 1;
v3 = 1;
j = 0;
# major chords
for i in range(0, 12):
root = i % 12;
third = (i+4) % 12;
fifth = (i+7) % 12;
ChordDetector.chordProfiles[j][root] = v1;
ChordDetector.chordProfiles[j][third] = v2;
ChordDetector.chordProfiles[j][fifth] = v3;
j+=1;
# minor chords
for i in range(0, 12):
root = i % 12;
third = (i+3) % 12;
fifth = (i+7) % 12;
ChordDetector.chordProfiles[j][root] = v1;
ChordDetector.chordProfiles[j][third] = v2;
ChordDetector.chordProfiles[j][fifth] = v3;
j+=1;
# diminished chords
for i in range(0, 12):
root = i % 12;
third = (i+3) % 12;
fifth = (i+6) % 12;
ChordDetector.chordProfiles[j][root] = v1;
ChordDetector.chordProfiles[j][third] = v2;
ChordDetector.chordProfiles[j][fifth] = v3;
j+=1;
# augmented chords
for i in range(0, 12):
root = i % 12;
third = (i+4) % 12;
fifth = (i+8) % 12;
ChordDetector.chordProfiles[j][root] = v1;
ChordDetector.chordProfiles[j][third] = v2;
ChordDetector.chordProfiles[j][fifth] = v3;
j+=1;
# sus2 chords
for i in range(0, 12):
root = i % 12;
third = (i+2) % 12;
fifth = (i+7) % 12;
ChordDetector.chordProfiles[j][root] = v1;
ChordDetector.chordProfiles[j][third] = v2;
ChordDetector.chordProfiles[j][fifth] = v3;
j+=1;
# sus4 chords
for i in range(0, 12):
root = i % 12;
third = (i+5) % 12;
fifth = (i+7) % 12;
ChordDetector.chordProfiles[j][root] = v1;
ChordDetector.chordProfiles[j][third] = v2;
ChordDetector.chordProfiles[j][fifth] = v3;
j+=1;
# major 7th chords
for i in range(0, 12):
root = i % 12;
third = (i+4) % 12;
fifth = (i+7) % 12;
seventh = (i+11) % 12;
ChordDetector.chordProfiles[j][root] = v1;
ChordDetector.chordProfiles[j][third] = v2;
ChordDetector.chordProfiles[j][fifth] = v3;
ChordDetector.chordProfiles[j][seventh] = v3;
j+=1;
# minor 7th chords
for i in range(0, 12):
root = i % 12;
third = (i+3) % 12;
fifth = (i+7) % 12;
seventh = (i+10) % 12;
ChordDetector.chordProfiles[j][root] = v1;
ChordDetector.chordProfiles[j][third] = v2;
ChordDetector.chordProfiles[j][fifth] = v3;
ChordDetector.chordProfiles[j][seventh] = v3;
j+=1;
# dominant 7th chords
for i in range(0, 12):
root = i % 12;
third = (i+4) % 12;
fifth = (i+7) % 12;
seventh = (i+10) % 12;
ChordDetector.chordProfiles[j][root] = v1;
ChordDetector.chordProfiles[j][third] = v2;
ChordDetector.chordProfiles[j][fifth] = v3;
ChordDetector.chordProfiles[j][seventh] = v3;
j+=1;
# print ChordDetector.chordProfiles
#=======================================================================
def detectChord(self, chroma):
for i in range(0, 12):
ChordDetector.chromagram[i] = chroma[i];
# print chromagram
self.classifyChromagram();
#=======================================================================
def classifyChromagram(self):
i = int()
j = int()
fifth = int()
chordindex = int(); #print ChordDetector.chromagram
# remove some of the 5th note energy from ChordDetector.chromagram
for i in range(0, 12):
fifth = (i+7) % 12;
ChordDetector.chromagram[fifth] = ChordDetector.chromagram[fifth] - (0.1*ChordDetector.chromagram[i]);
if (ChordDetector.chromagram[fifth] < 0):
ChordDetector.chromagram[fifth] = 0;
# major chords
for j in range(0, 12):
ChordDetector.chord[j] = self.calculateChordScore(ChordDetector.chromagram,ChordDetector.chordProfiles[j],ChordDetector.bias,3);
# minor chords
for j in range(12, 24):
ChordDetector.chord[j] = self.calculateChordScore(ChordDetector.chromagram,ChordDetector.chordProfiles[j],ChordDetector.bias,3);
# diminished 5th chords
for j in range(24, 36):
ChordDetector.chord[j] = self.calculateChordScore(ChordDetector.chromagram,ChordDetector.chordProfiles[j],ChordDetector.bias,3);
# augmented 5th chords
for j in range(36, 48):
ChordDetector.chord[j] = self.calculateChordScore(ChordDetector.chromagram,ChordDetector.chordProfiles[j],ChordDetector.bias,3);
# sus2 chords
for j in range(48, 60):
ChordDetector.chord[j] = self.calculateChordScore(ChordDetector.chromagram,ChordDetector.chordProfiles[j],1,3);
# sus4 chords
for j in range(60, 72):
ChordDetector.chord[j] = self.calculateChordScore(ChordDetector.chromagram,ChordDetector.chordProfiles[j],1,3);
# major 7th chords
for j in range(72, 84):
ChordDetector.chord[j] = self.calculateChordScore(ChordDetector.chromagram,ChordDetector.chordProfiles[j],1,4);
# minor 7th chords
for j in range(84, 96):
ChordDetector.chord[j] = self.calculateChordScore(ChordDetector.chromagram,ChordDetector.chordProfiles[j],ChordDetector.bias,4);
# dominant 7th chords
for j in range(96, 108):
ChordDetector.chord[j] = self.calculateChordScore(ChordDetector.chromagram,ChordDetector.chordProfiles[j],ChordDetector.bias,4);
chordindex = self.minimumIndex(ChordDetector.chord,108);
# major
if (chordindex < 12):
ChordDetector.rootNote = chordindex;
ChordDetector.quality = ChordDetector.ChordQuality.index('Major');
ChordDetector.intervals = 0;
# minor
if ((chordindex >= 12) and (chordindex < 24)):
ChordDetector.rootNote = chordindex-12;
ChordDetector.quality = ChordDetector.ChordQuality.index('Minor');
ChordDetector.intervals = 0;
# diminished 5th
if ((chordindex >= 24) and (chordindex < 36)):
ChordDetector.rootNote = chordindex-24;
ChordDetector.quality = ChordDetector.ChordQuality.index('Diminished5th');
ChordDetector.intervals = 0;
# augmented 5th
if ((chordindex >= 36) and (chordindex < 48)):
ChordDetector.rootNote = chordindex-36;
ChordDetector.quality = ChordDetector.ChordQuality.index('Augmented5th');
ChordDetector.intervals = 0;
# sus2
if ((chordindex >= 48) and (chordindex < 60)):
ChordDetector.rootNote = chordindex-48;
ChordDetector.quality = ChordDetector.ChordQuality.index('Suspended');
ChordDetector.intervals = 2;
# sus4
if ((chordindex >= 60) and (chordindex < 72)):
ChordDetector.rootNote = chordindex-60;
ChordDetector.quality = ChordDetector.ChordQuality.index('Suspended');
ChordDetector.intervals = 4;
# major 7th
if ((chordindex >= 72) and (chordindex < 84)):
ChordDetector.rootNote = chordindex-72;
ChordDetector.quality = ChordDetector.ChordQuality.index('Major');
ChordDetector.intervals = 7;
# minor 7th
if ((chordindex >= 84) and (chordindex < 96)):
ChordDetector.rootNote = chordindex-84;
ChordDetector.quality = ChordDetector.ChordQuality.index('Minor');
ChordDetector.intervals = 7;
# dominant 7th
if ((chordindex >= 96) and (chordindex < 108)):
ChordDetector.rootNote = chordindex-96;
ChordDetector.quality = ChordDetector.ChordQuality.index('Dominant');
ChordDetector.intervals = 7;
#=======================================================================
def calculateChordScore(self, chroma,chordProfile,biasToUse,N):
sum = 0;
delta = 0;
for i in range(0, 12):
sum += (((1-chordProfile[i])*(chroma[i]*chroma[i])));
delta = (math.sqrt(sum) / ((12 - N)*biasToUse));
return delta
#=======================================================================
def minimumIndex(self, array,arrayLength):
minValue = 100000;
minIndex = 0;
for i in range(0, arrayLength):
if (array[i] < minValue):
minValue = array[i];
minIndex = i;
return minIndex;
| {
"repo_name": "gmittal/joey-alexander",
"path": "util/chords.py",
"copies": "1",
"size": "10739",
"license": "mit",
"hash": 7695201018163842000,
"line_mean": 30.2180232558,
"line_max": 134,
"alpha_frac": 0.5735170873,
"autogenerated": false,
"ratio": 3.0379066478076377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41114237351076377,
"avg_score": null,
"num_lines": null
} |
"""Algorithm for finding strongly connected components from a directed graph.
Component is strongly connected when there's a path from every vertex in to
every other vertex within the component.
Time complexity: O(V + E)
"""
from collections import defaultdict
from algolib.graph.dfs import DFS
def __process_vertex_early(_graph, dfs, vertex):
dfs.stack.append(vertex)
def __process_vertex_late(_graph, dfs, vertex):
obj = dfs[vertex]
if obj.low == vertex:
while True:
top = dfs.stack.pop()
dfs[top].component = dfs.index
dfs.result[dfs.index].append(top)
if top == vertex:
break
dfs.index += 1
if obj.parent and dfs[obj.low].entry < dfs[dfs[obj.parent].low].entry:
dfs[obj.parent].low = obj.low
def __process_edge(_graph, dfs, source, dest, _edge):
cat = dfs.edge_category(source, dest)
src = dfs[source]
dst = dfs[dest]
if cat == DFS.BACK or (cat == DFS.CROSS and dst.component is None):
if dst.entry < dfs[src.low].entry:
src.low = dest
return True
def strong_components(graph):
"""Finds strongly connected components from given directed graph.
Args:
graph: Directed graph.
Returns:
List of components where each component is a list of vertices in
that component. Components and vertices within a component are in
no particular order.
"""
dfs = DFS(graph,
process_vertex_early=__process_vertex_early,
process_vertex_late=__process_vertex_late,
process_edge=__process_edge)
# Current component index
dfs.index = 0
# Stack to store visited components
dfs.stack = []
# Result dictionary
dfs.result = defaultdict(list)
for vertex in graph.vertices:
# First visited vertex in current component
dfs[vertex].low = vertex
# Component number
dfs[vertex].component = None
for vertex in graph.vertices:
if dfs[vertex].state == DFS.UNDISCOVERED:
dfs.execute(vertex)
return dfs.result.values()
| {
"repo_name": "niemmi/algolib",
"path": "algolib/graph/strong_components.py",
"copies": "1",
"size": "2131",
"license": "bsd-3-clause",
"hash": 2190603806152446000,
"line_mean": 26.3205128205,
"line_max": 77,
"alpha_frac": 0.6316283435,
"autogenerated": false,
"ratio": 3.895795246800731,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 78
} |
# Algorithm for maximum coverage problem
import numpy as np
def maximum_k_coverage(sets, k):
covered = set()
selected_sets = []
if k >= len(sets):
return sets
for i in xrange(k):
max_set = max(sets, key=lambda s: len(s - covered))
selected_sets.append(max_set)
covered |= max_set
return selected_sets
def argmax_k_coverage(sets, k):
"""return the indices of the selected sets
"""
covered = set()
selected_set_indices = []
if k >= len(sets):
return range(len(sets))
for i in xrange(k):
uncovered_sizes = map(lambda s: len(s - covered), sets)
max_set_index = np.argmax(uncovered_sizes)
selected_set_indices.append(max_set_index)
covered |= sets[max_set_index]
return selected_set_indices
def k_best_trees(cand_trees, K):
nodes_of_trees = [set(t.nodes()) for t in cand_trees]
selected_ids = argmax_k_coverage(nodes_of_trees, K)
pred_trees = [cand_trees[i] for i in selected_ids]
return pred_trees
| {
"repo_name": "xiaohan2012/lst",
"path": "max_cover.py",
"copies": "1",
"size": "1054",
"license": "mit",
"hash": -5074639635083706000,
"line_mean": 26.7368421053,
"line_max": 63,
"alpha_frac": 0.6110056926,
"autogenerated": false,
"ratio": 3.411003236245955,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4522008928845955,
"avg_score": null,
"num_lines": null
} |
"""Algorithm for simulating a 2048 game using Monte-Carlo method."""
import random, _2048
SIMULATE_TIMES = 100000
DIRECTIONS = ('UP', 'DOWN', 'LEFT', 'RIGHT')
def simulate_to_end(game):
while game.get_state():
dircts = list(DIRECTIONS)
for i in xrange(3):
c = random.choice(dircts)
if game.move(c):
break
dircts.remove(c)
return game.get_score()
def score_sum(game,direction):
score = 0
temp = game.clone()
temp.move(direction)
for i in xrange(SIMULATE_TIMES):
score += simulate_to_end(temp)
return score
def monte_carlo(game):
scores = {}
biggest = 0
best = None
directions = list(DIRECTIONS)
for d in DIRECTIONS:
test = game.clone()
if not test.move(d):
directions.remove(d)
for direction in directions:
temp = game.clone()
score = score_sum(temp, direction)
if score > biggest:
biggest = score
best = direction
scores[direction] = score
print scores
if len(set(scores)) == 1:
return False
else:
return best
if __name__ == '__main__':
a_game = _2048.Gameplay()
print monte_carlo(a_game) | {
"repo_name": "WillSkywalker/2048_monte_carlo",
"path": "monte_carlo.py",
"copies": "1",
"size": "1244",
"license": "apache-2.0",
"hash": -7812411360253709000,
"line_mean": 22.9423076923,
"line_max": 68,
"alpha_frac": 0.5699356913,
"autogenerated": false,
"ratio": 3.6374269005847952,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9678527538429995,
"avg_score": 0.005767010690960007,
"num_lines": 52
} |
algorithm = "fourier"
propagator = "chinchen"
T = 2 * 4.4
dt = 0.05
dimension = 1
ncomponents = 1
eps = 0.1
potential = "quartic"
sigma = 4.0
# The grid of our simulation domain
limits = [(-6.283185307179586, 6.283185307179586)]
number_nodes = [8192]
# The parameter set of the initial wavepacket
Q = [[1.0 ]]
P = [[1.0j]]
q = [[0.0 ]]
p = [[1.0 ]]
S = [[0.0 ]]
# What it takes to specify a wavepacket!
wp0 = {
"type": "HagedornWavepacket",
"dimension": 1,
"ncomponents": 1,
"eps": eps,
"Pi": [q, p, Q, P, S],
"basis_shapes": [{
"type": "HyperbolicCutShape",
"K": 7,
"dimension": 1
}],
"coefficients": [[((0,), 1.0)]],
"innerproduct": {
"type": "HomogeneousInnerProduct",
"delegate": {
"type": "DirectHomogeneousQuadrature",
'qr': {
'type': 'TensorProductQR',
'dimension': 1,
'qr_rules': [{'dimension': 1, 'order': 14, 'type': 'GaussHermiteQR'}]
}
}
}
}
# Which wavepackets are initial values
initvals = [wp0]
# How often do we write data to disk
write_nth = 1
| {
"repo_name": "WaveBlocks/WaveBlocksND",
"path": "examples/quartic/quartic_1D_f_cc.py",
"copies": "1",
"size": "1142",
"license": "bsd-3-clause",
"hash": -6076608341322623000,
"line_mean": 19.3928571429,
"line_max": 85,
"alpha_frac": 0.5323992995,
"autogenerated": false,
"ratio": 2.9739583333333335,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.40063576328333333,
"avg_score": null,
"num_lines": null
} |
algorithm = 'fourier'
propagator = 'fourier'
T = 6
dt = 0.01
dimension = 2
ncomponents = 1
eps = 0.05
potential = 'henon_heiles'
a = 1
b = 3
limits = [[-1.5707963267948966, 1.5707963267948966],
[-1.5707963267948966, 1.5707963267948966]]
number_nodes = [2**10, 2**10]
# The parameter set of the initial wavepacket
Q = [[1.0, 0.0],
[0.0, 1.0]]
P = [[1.0j, 0.0 ],
[0.0, 1.0j]]
q = [[0.06],
[0.0]]
p = [[-0.01],
[ 0.01]]
S = [[0.0]]
# What it takes to specify a wavepacket!
wp0 = {
'type': 'HagedornWavepacket',
'dimension': 2,
'ncomponents': 1,
'eps': eps,
'Pi': [q, p, Q, P, S],
'basis_shapes': [{
'type': 'HyperCubicShape',
'limits': [10, 10],
'dimension': 2
}],
'coefficients': [[((0, 0), 1.0)]],
'innerproduct': {
'type': 'HomogeneousInnerProduct',
'delegate': {
'type': 'DirectHomogeneousQuadrature',
'qr': {
'type': 'TensorProductQR',
'dimension': 2,
'qr_rules': [{'dimension': 1, 'order': 15, 'type': 'GaussHermiteQR'},
{'dimension': 1, 'order': 15, 'type': 'GaussHermiteQR'}],
}
}
}
}
# Which wavepackets are initial values
initvals = [wp0]
leading_component = 0
# How often do we write data to disk
write_nth = 5
| {
"repo_name": "WaveBlocks/WaveBlocksND",
"path": "examples/henon_heiles/henon2_f.py",
"copies": "1",
"size": "1365",
"license": "bsd-3-clause",
"hash": -2106431423564026000,
"line_mean": 19.0735294118,
"line_max": 86,
"alpha_frac": 0.5091575092,
"autogenerated": false,
"ratio": 2.802874743326489,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.8801210023833382,
"avg_score": 0.0021644457386212827,
"num_lines": 68
} |
algorithm = "fourier"
propagator = "fourier"
T = 70
dt = 0.005
dimension = 1
ncomponents = 1
# Note: the eps in the paper is our eps**2
eps = 0.1530417681822
potential = "eckart"
sigma = 100 * 3.8008 * 10**(-4.0)
a = 1.0 / (2.0 * 0.52918)
# The grid of our simulation domain
limits = [[-9 * 3.141592653589793, 9 * 3.141592653589793]]
number_nodes = [2**12]
# The parameter set of the initial wavepacket
Q = [[ 3.5355339059327 ]]
P = [[ 0.2828427124746j]]
q = [[-7.5589045088306 ]]
p = [[ 0.2478854736792 ]]
S = [[0.0]]
# What it takes to specify a wavepacket!
wp0 = {
"type": "HagedornWavepacket",
"dimension": dimension,
"ncomponents": ncomponents,
"eps": eps,
"Pi": [q, p, Q, P, S],
"basis_shapes": [{
"type": "HyperbolicCutShape",
"K": 512,
"dimension": 1
}],
"coefficients": [[((0,), 1.0)]],
"innerproduct": {
"type": "HomogeneousInnerProduct",
"delegate": {
"type": "DirectHomogeneousQuadrature",
'qr': {
'type': 'TensorProductQR',
'dimension': 1,
'qr_rules': [{'dimension': dimension, 'order': 516, 'type': 'GaussHermiteQR'}]
}
}
}
}
# Which wavepackets are initial values
initvals = [wp0]
leading_component = 0
# How often do we write data to disk
write_nth = 20
matrix_exponential = "arnoldi"
arnoldi_steps = 15
| {
"repo_name": "WaveBlocks/WaveBlocksND",
"path": "examples/tunneling_eckart/eckart_phi0_f.py",
"copies": "1",
"size": "1403",
"license": "bsd-3-clause",
"hash": 5871295283274520000,
"line_mean": 21.2698412698,
"line_max": 94,
"alpha_frac": 0.5716322167,
"autogenerated": false,
"ratio": 2.941299790356394,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.4012932007056394,
"avg_score": null,
"num_lines": null
} |
algorithm = "hagedorn"
propagator = "semiclassical"
splitting_method = "Y4"
T = 10
dt = 0.01
dimension = 1
ncomponents = 2
eps = 0.2
delta = eps
potential = "delta_gap"
leading_component = 0
# The parameter set of the initial wavepacket
Q = [[1.0 - 5.0j]]
P = [[ 1.0j]]
q = [[-5.0]]
p = [[ 1.0]]
S = [[0.0]]
# What it takes to specify a wavepacket!
wp0 = {
"type": "HagedornWavepacket",
"dimension": dimension,
"ncomponents": ncomponents,
"eps": eps,
"Pi": [q, p, Q, P, S],
"basis_shapes": [{
"type": "HyperbolicCutShape",
"K": 64,
"dimension": 1
}, {
"type": "HyperbolicCutShape",
"K": 64,
"dimension": 1
}],
"coefficients": [[((0,), 1.0)],
[((0,), 0.0)]],
"innerproduct": {
"type": "HomogeneousInnerProduct",
"delegate": {
"type": "DirectHomogeneousQuadrature",
'qr': {
'type': 'TensorProductQR',
'dimension': 1,
'qr_rules': [{'dimension': 1, 'order': 68, 'type': 'GaussHermiteQR'}]
}
}
}
}
# Which wavepackets are initial values
initvals = [wp0]
# How often do we write data to disk
write_nth = 2
matrix_exponential = "pade"
| {
"repo_name": "WaveBlocks/WaveBlocksND",
"path": "examples/single_avoided_crossing/single_crossing_1D_p.py",
"copies": "1",
"size": "1264",
"license": "bsd-3-clause",
"hash": -8892399681891240000,
"line_mean": 19.3870967742,
"line_max": 85,
"alpha_frac": 0.5134493671,
"autogenerated": false,
"ratio": 3.144278606965174,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.9146342283512982,
"avg_score": 0.002277138110438541,
"num_lines": 62
} |
algorithm = "hagedorn"
propagator = "semiclassical"
splitting_method = "Y4"
T = 12
dt = 0.001
dimension = 1
ncomponents = 1
eps = 0.01
potential = "cosh_osc"
# The parameter set of the initial wavepacket
Q = [[1.0]]
P = [[1.0j]]
q = [[1.0]]
p = [[0.0]]
S = [[0.0]]
wp0 = {
"type": "HagedornWavepacket",
"dimension": 1,
"ncomponents": 1,
"eps": eps,
"Pi": [q, p, Q, P, S],
"basis_shapes": [{
"type": "HyperbolicCutShape",
"K": 10,
"dimension": 1
}],
"coefficients": [[((0,), 1.0)]],
"innerproduct": {
"type": "HomogeneousInnerProduct",
"delegate": {
"type": "NSDInhomogeneous",
'qr': {
'type': 'GaussLaguerreQR',
'order': 5,
'a': -0.5
}
}
}
}
initvals = [wp0]
leading_component = 0
write_nth = 5
matrix_exponential = "pade"
observables = {
"autocorrelation": {
"innerproduct": {
"type": "InhomogeneousInnerProduct",
"delegate": {
"type": "NSDInhomogeneous",
'qr': {
'type': 'GaussLaguerreQR',
'order': 5,
'a': -0.5
}
}
}
}
}
| {
"repo_name": "WaveBlocks/WaveBlocksND",
"path": "examples/cosh_oscillators/cosh_1D_p_nsd.py",
"copies": "1",
"size": "1276",
"license": "bsd-3-clause",
"hash": 2359656071280431600,
"line_mean": 17.2285714286,
"line_max": 48,
"alpha_frac": 0.4420062696,
"autogenerated": false,
"ratio": 3.174129353233831,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.9116135622833831,
"avg_score": 0,
"num_lines": 70
} |
algorithm = "hagedorn"
propagator = "semiclassical"
splitting_method = "Y4"
T = 12
dt = 0.01
dimension = 1
ncomponents = 1
eps = 0.1
potential = "quadratic"
# The parameter set of the initial wavepacket
Q = [[1.0]]
P = [[1.0j]]
q = [[1.0]]
p = [[0.0]]
S = [[0.0]]
# What it takes to specify a wavepacket!
wp0 = {
"type": "HagedornWavepacket",
"dimension": 1,
"ncomponents": 1,
"eps": eps,
"Pi": [q, p, Q, P, S],
"basis_shapes": [{
"type": "HyperbolicCutShape",
"K": 10,
"dimension": 1
}],
"coefficients": [[((0,), 1.0)]],
"innerproduct": {
"type": "HomogeneousInnerProduct",
"delegate": {
"type": "DirectHomogeneousQuadrature",
'qr': {
'type': 'TensorProductQR',
'dimension': 1,
'qr_rules': [{'dimension': 1, 'order': 14, 'type': 'GaussHermiteQR'}]
}
}
}
}
# Which wavepackets are initial values
initvals = [wp0]
leading_component = 0
# How often do we write data to disk
write_nth = 5
matrix_exponential = "pade"
| {
"repo_name": "WaveBlocks/WaveBlocksND",
"path": "examples/harmonic_oscillators/harmonic_1D_p.py",
"copies": "1",
"size": "1096",
"license": "bsd-3-clause",
"hash": 3774122229013654500,
"line_mean": 18.5714285714,
"line_max": 85,
"alpha_frac": 0.5319343066,
"autogenerated": false,
"ratio": 3.070028011204482,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.9100924111824416,
"avg_score": 0.00020764119601328904,
"num_lines": 56
} |
algorithm = "hagedorn"
propagator = "semiclassical"
splitting_method = "Y4"
T = 12
dt = 0.01
dimension = 1
ncomponents = 1
eps = 0.24
potential = "quadratic"
sigma = 0.5
# The parameter set of the initial wavepacket
# Parameter values computed by 'ComputeGroundstate.py'
Q = [[1.18920712 + 0.0j]]
P = [[0.0 + 0.84089642j]]
q = [[0.0]]
p = [[0.0]]
S = [[0.0]]
# What it takes to specify a wavepacket!
wp0 = {
"type": "HagedornWavepacket",
"dimension": 1,
"ncomponents": 1,
"eps": eps,
"Pi": [q, p, Q, P, S],
"basis_shapes": [{
"type": "HyperbolicCutShape",
"K": 10,
"dimension": 1
}],
"coefficients": [[((0,), 1.0)]],
"innerproduct": {
"type": "HomogeneousInnerProduct",
"delegate": {
"type": "DirectHomogeneousQuadrature",
'qr': {
'type': 'TensorProductQR',
'dimension': 1,
'qr_rules': [{'dimension': 1, 'order': 14, 'type': 'GaussHermiteQR'}]
}
}
}
}
# Which wavepackets are initial values
initvals = [wp0]
leading_component = 0
# How often do we write data to disk
write_nth = 5
matrix_exponential = "pade"
| {
"repo_name": "WaveBlocks/WaveBlocksND",
"path": "examples/harmonic_oscillators/harmonic_1D_p_stationary_groundstate.py",
"copies": "1",
"size": "1191",
"license": "bsd-3-clause",
"hash": 2341553007688635400,
"line_mean": 19.5344827586,
"line_max": 85,
"alpha_frac": 0.5499580185,
"autogenerated": false,
"ratio": 3.0538461538461537,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.4103804172346154,
"avg_score": null,
"num_lines": null
} |
algorithm = "hagedorn"
propagator = "semiclassical"
splitting_method = "Y4"
T = 12
dt = 0.01
dimension = 2
ncomponents = 1
eps = 1.0
potential = "quadratic_2d"
# The parameter set of the initial wavepacket
# Parameter values computed by 'ComputeGroundstate.py'
Q = [[1.18920712, 0.00000000],
[0.00000000, 1.18920712]]
P = [[0.84089642j, 0.j ],
[0.j, 0.84089642j]]
q = [[ 9.90555981e-14],
[-4.51334304e-13]]
p = [[0.0],
[0.0]]
S = [[0.0]]
# What it takes to specify a wavepacket!
wp0 = {
"type": "HagedornWavepacket",
"dimension": 2,
"ncomponents": 1,
"eps": eps,
"Pi": [q, p, Q, P, S],
"basis_shapes": [{
"type": "HyperbolicCutShape",
"K": 4,
"dimension": 2
}],
"coefficients": [[((0, 0), 1.0)]],
"innerproduct": {
"type": "HomogeneousInnerProduct",
"delegate": {
"type": "DirectHomogeneousQuadrature",
'qr': {
'type': 'TensorProductQR',
'dimension': 2,
'qr_rules': [{'dimension': 1, 'order': 8, 'type': 'GaussHermiteQR'},
{'dimension': 1, 'order': 8, 'type': 'GaussHermiteQR'}],
}
}
}
}
# Which wavepackets are initial values
initvals = [wp0]
leading_component = 0
# How often do we write data to disk
write_nth = 5
matrix_exponential = "pade"
| {
"repo_name": "WaveBlocks/WaveBlocksND",
"path": "examples/harmonic_oscillators/harmonic_2D_p_stationary_groundstate.py",
"copies": "1",
"size": "1395",
"license": "bsd-3-clause",
"hash": 1173467887595219700,
"line_mean": 20.1363636364,
"line_max": 85,
"alpha_frac": 0.5318996416,
"autogenerated": false,
"ratio": 3.0064655172413794,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.40383651588413794,
"avg_score": null,
"num_lines": null
} |
algorithm = "hagedorn"
propagator = "semiclassical"
splitting_method = "Y4"
T = 2 * 4.4
dt = 0.0005
dimension = 1
ncomponents = 1
eps = 0.1
potential = {}
potential["variables"] = ["x"]
potential["potential"] = "x**4 - x**2"
# The parameter set of the initial wavepacket
Q = [[1.0]]
P = [[1.0j]]
q = [[1.0]]
p = [[0.0]]
S = [[0.0]]
# What it takes to specify a wavepacket!
wp0 = {
"type": "HagedornWavepacket",
"dimension": 1,
"ncomponents": 1,
"eps": eps,
"Pi": [q, p, Q, P, S],
"basis_shapes": [{
"type": "HyperbolicCutShape",
"K": 32,
"dimension": 1
}],
"coefficients": [[((0,), 1.0)]],
"innerproduct": {
"type": "HomogeneousInnerProduct",
"delegate": {
"type": "DirectHomogeneousQuadrature",
'qr': {
'type': 'TensorProductQR',
'dimension': 1,
'qr_rules': [{'dimension': 1, 'order': 36, 'type': 'GaussHermiteQR'}]
}
}
}
}
# Which wavepackets are initial values
initvals = [wp0]
leading_component = 0
# How often do we write data to disk
write_nth = 10
matrix_exponential = "pade"
| {
"repo_name": "WaveBlocks/WaveBlocksND",
"path": "examples/double_well/double_well_1D_p.py",
"copies": "1",
"size": "1165",
"license": "bsd-3-clause",
"hash": 267748695122920640,
"line_mean": 19.0862068966,
"line_max": 85,
"alpha_frac": 0.5313304721,
"autogenerated": false,
"ratio": 3.049738219895288,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.9080066286221431,
"avg_score": 0.00020048115477145148,
"num_lines": 58
} |
algorithm = "hagedorn"
propagator = "semiclassical"
splitting_method = "Y4"
T = 70
dt = 0.005
dimension = 1
ncomponents = 1
# Note: the eps in the paper is our eps**2
eps = 0.1530417681822
potential = "eckart"
sigma = 100 * 3.8008 * 10**(-4.0)
a = 1.0 / (2.0 * 0.52918)
# The parameter set of the initial wavepacket
Q = [[ 3.5355339059327 ]]
P = [[ 0.2828427124746j]]
q = [[-7.5589045088306 ]]
p = [[ 0.2478854736792 ]]
S = [[0.0]]
# What it takes to specify a wavepacket!
wp0 = {
"type": "HagedornWavepacket",
"dimension": dimension,
"ncomponents": ncomponents,
"eps": eps,
"Pi": [q, p, Q, P, S],
"basis_shapes": [{
"type": "HyperbolicCutShape",
"K": 512,
"dimension": 1
}],
"coefficients": [[((2,), 1.0)]],
"innerproduct": {
"type": "HomogeneousInnerProduct",
"delegate": {
"type": "DirectHomogeneousQuadrature",
'qr': {
'type': 'TensorProductQR',
'dimension': 1,
'qr_rules': [{'dimension': dimension, 'order': 516, 'type': 'GaussHermiteQR'}]
}
}
}
}
# Which wavepackets are initial values
initvals = [wp0]
leading_component = 0
# How often do we write data to disk
write_nth = 20
matrix_exponential = "arnoldi"
arnoldi_steps = 15
| {
"repo_name": "WaveBlocks/WaveBlocksND",
"path": "examples/tunneling_eckart/eckart_phi2_p.py",
"copies": "1",
"size": "1315",
"license": "bsd-3-clause",
"hash": 1416442261457415000,
"line_mean": 20.9166666667,
"line_max": 94,
"alpha_frac": 0.5657794677,
"autogenerated": false,
"ratio": 2.975113122171946,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.40408925898719456,
"avg_score": null,
"num_lines": null
} |
algorithm = "hagedorn"
propagator = "semiclassical"
splitting_method = "Y4"
T =
dt =
dimension = 1
ncomponents = 1
eps =
potential =
# The parameter set of the initial wavepacket
Q = [[1.0]]
P = [[1.0j]]
q = [[1.0]]
p = [[0.0]]
S = [[0.0]]
leading_component =
# How often do we write data to disk
write_nth = 1
matrix_exponential = "pade"
# What it takes to specify a wavepacket!
wp0 = {
"type": "HagedornWavepacket",
"dimension": 1,
"ncomponents": 1,
"eps": eps,
"Pi": [q, p, Q, P, S],
"basis_shapes": [{
"type": "HyperbolicCutShape",
"K": 10,
"dimension": 1
}],
"coefficients": [[((0,), 1.0)]],
"innerproduct": {
"type": "HomogeneousInnerProduct",
"delegate": {
"type": "DirectHomogeneousQuadrature",
'qr': {
'type': 'TensorProductQR',
'dimension': 1,
'qr_rules': [{'dimension': 1, 'order': 14, 'type': 'GaussHermiteQR'}]
}
}
}
}
# Which wavepackets are initial values
initvals = [wp0]
| {
"repo_name": "WaveBlocks/WaveBlocksND",
"path": "examples/templates/template_1D_p.py",
"copies": "1",
"size": "1070",
"license": "bsd-3-clause",
"hash": -467067753189527230,
"line_mean": 18.1071428571,
"line_max": 85,
"alpha_frac": 0.5289719626,
"autogenerated": false,
"ratio": 3.101449275362319,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.9129383031982252,
"avg_score": 0.00020764119601328904,
"num_lines": 56
} |
algorithm = "hagedorn"
propagator = "semiclassical"
splitting_method = "Y4"
T =
dt =
dimension = 2
ncomponents = 1
eps =
potential =
# The parameter set of the initial wavepacket
Q = [[1.0, 0.0],
[0.0, 1.0]]
P = [[1.0j, 0.0 ],
[0.0, 1.0j]]
q = [[-1.0],
[ 0.0]]
p = [[0.0],
[0.0]]
S = [[0.0]]
leading_component = 0
# How often do we write data to disk
write_nth = 1
matrix_exponential = "pade"
# What it takes to specify a wavepacket!
wp0 = {
"type": "HagedornWavepacket",
"dimension": 2,
"ncomponents": 1,
"eps": eps,
"Pi": [q, p, Q, P, S],
"basis_shapes": [{
"type": "HyperbolicCutShape",
"K": 8,
"dimension": 2
}],
"coefficients": [[((0, 0), 1.0)]],
"innerproduct": {
"type": "HomogeneousInnerProduct",
"delegate": {
"type": "DirectHomogeneousQuadrature",
'qr': {
'type': 'TensorProductQR',
'dimension': 2,
'qr_rules': [{'dimension': 1, 'order': 24, 'type': 'GaussHermiteQR'},
{'dimension': 1, 'order': 24, 'type': 'GaussHermiteQR'}],
}
}
}
}
# Which wavepackets are initial values
initvals = [wp0]
| {
"repo_name": "WaveBlocks/WaveBlocksND",
"path": "examples/templates/template_2D_p.py",
"copies": "1",
"size": "1237",
"license": "bsd-3-clause",
"hash": -8330119545310793000,
"line_mean": 18.0307692308,
"line_max": 86,
"alpha_frac": 0.5004042037,
"autogenerated": false,
"ratio": 2.9735576923076925,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.8962217525567635,
"avg_score": 0.002348874088011581,
"num_lines": 65
} |
algorithm = "hagedorn"
propagator = "semiclassical"
splitting_method = "Y61"
T = 15
dt = 0.01
dimension = 1
ncomponents = 1
eps = 0.1104536
potential = 'morse_zero'
D = 0.0572
a = 0.983
x0 = 5.03855
# The parameter set of the initial wavepacket
# Sigma = PQ^{-1} = 1.3836
Q = [[1.0]]
P = [[1.0j]]
q = [[4.53]]
p = [[0.0]]
S = [[0.0]]
wp0 = {
'type': 'HagedornWavepacket',
'dimension': 1,
'ncomponents': 1,
'eps': eps,
'Pi': [q, p, Q, P, S],
'basis_shapes': [{
'type': 'HyperbolicCutShape',
'K': 32,
'dimension': 1
}],
'coefficients': [[((0,), 1.0)]],
'innerproduct': {
'type': 'HomogeneousInnerProduct',
'delegate': {
'type': 'DirectHomogeneousQuadrature',
'qr': {
'type': 'TensorProductQR',
'dimension': 1,
'qr_rules': [{'dimension': 1, 'order': 36, 'type': 'GaussHermiteQR'}]
}
}
}
}
# Which wavepackets are initial values
initvals = [wp0]
leading_component = 0
# How often do we write data to disk
write_nth = 10
matrix_exponential = 'pade'
| {
"repo_name": "WaveBlocks/WaveBlocksND",
"path": "examples/morse_oscillators/I2_morse.py",
"copies": "1",
"size": "1126",
"license": "bsd-3-clause",
"hash": 2561484126955237000,
"line_mean": 18.0847457627,
"line_max": 85,
"alpha_frac": 0.526642984,
"autogenerated": false,
"ratio": 2.909560723514212,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.3936203707514212,
"avg_score": null,
"num_lines": null
} |
'Algorithmia Algorithm API Client (python)'
import base64
import json
import re
from Algorithmia.async_response import AsyncResponse
from Algorithmia.algo_response import AlgoResponse
from Algorithmia.errors import ApiError, ApiInternalError, raiseAlgoApiError
from enum import Enum
from algorithmia_api_client.rest import ApiException
from algorithmia_api_client import CreateRequest, UpdateRequest, VersionRequest, Details, Settings, SettingsMandatory, SettingsPublish, \
CreateRequestVersionInfo, VersionInfo, VersionInfoPublish
OutputType = Enum('OutputType','default raw void')
class Algorithm(object):
def __init__(self, client, algoRef):
# Parse algoRef
algoRegex = re.compile(r"(?:algo://|/|)(\w+/.+)")
m = algoRegex.match(algoRef)
if m is not None:
self.client = client
self.path = m.group(1)
self.username = self.path.split("/")[0]
self.algoname = self.path.split("/")[1]
if len(self.path.split("/")) > 2:
self.version = self.path.split("/")[2]
self.url = '/v1/algo/' + self.path
self.query_parameters = {}
self.output_type = OutputType.default
else:
raise ValueError('Invalid algorithm URI: ' + algoRef)
def set_options(self, timeout=300, stdout=False, output=OutputType.default, **query_parameters):
self.query_parameters = {'timeout':timeout, 'stdout':stdout}
self.output_type = output
self.query_parameters.update(query_parameters)
return self
# Create a new algorithm
def create(self, details={}, settings={}, version_info={}):
detailsObj = Details(**details)
settingsObj = SettingsMandatory(**settings)
createRequestVersionInfoObj = CreateRequestVersionInfo(**version_info)
create_parameters = {"name": self.algoname, "details": detailsObj, "settings": settingsObj, "version_info": createRequestVersionInfoObj}
create_request = CreateRequest(**create_parameters)
try:
# Create Algorithm
api_response = self.client.manageApi.create_algorithm(self.username, create_request)
return api_response
except ApiException as e:
error_message = json.loads(e.body)
raise raiseAlgoApiError(error_message)
# Update the settings in an algorithm
def update(self, details={}, settings={}, version_info={}):
detailsObj = Details(**details)
settingsObj = Settings(**settings)
createRequestVersionInfoObj = CreateRequestVersionInfo(**version_info)
update_parameters = {"details": detailsObj, "settings": settingsObj, "version_info": createRequestVersionInfoObj}
update_request = UpdateRequest(**update_parameters)
try:
# Update Algorithm
api_response = self.client.manageApi.update_algorithm(self.username, self.algoname, update_request)
return api_response
except ApiException as e:
error_message = json.loads(e.body)
raise raiseAlgoApiError(error_message)
# Publish an algorithm
def publish(self, details={}, settings={}, version_info={}):
detailsObj = Details(**details)
settingsObj = SettingsPublish(**settings)
versionRequestObj = VersionInfoPublish(**version_info)
publish_parameters = {"details": detailsObj, "settings": settingsObj, "version_info": versionRequestObj}
version_request = VersionRequest(**publish_parameters) # VersionRequest | Publish Version Request
try:
# Publish Algorithm
api_response = self.client.manageApi.publish_algorithm(self.username, self.algoname, version_request)
return api_response
except ApiException as e:
error_message = json.loads(e.body)
raise raiseAlgoApiError(error_message)
def builds(self, limit=56, marker=None):
try:
if marker is not None:
api_response = self.client.manageApi.get_algorithm_builds(self.username, self.algoname, limit=limit, marker=marker)
else:
api_response = self.client.manageApi.get_algorithm_builds(self.username, self.algoname, limit=limit)
return api_response
except ApiException as e:
error_message = json.loads(e.body)
raise raiseAlgoApiError(error_message)
def get_build(self, build_id):
# Get the build object for a given build_id
# The build status can have one of the following value: succeeded, failed, in-progress
try:
api_response = self.client.manageApi.get_algorithm_build_by_id(self.username, self.algoname, build_id)
return api_response
except ApiException as e:
error_message = json.loads(e.body)
raise raiseAlgoApiError(error_message)
def get_build_logs(self, build_id):
# Get the algorithm build logs for a given build_id
try:
api_response = self.client.manageApi.get_algorithm_build_logs(self.username, self.algoname, build_id)
return api_response
except ApiException as e:
error_message = json.loads(e.body)
raise raiseAlgoApiError(error_message)
def build_logs(self):
url = '/v1/algorithms/'+self.username+'/'+self.algoname+'/builds'
response = json.loads(self.client.getHelper(url).content.decode('utf-8'))
return response
def get_scm_status(self):
try:
api_response = self.client.manageApi.get_algorithm_scm_connection_status(self.username, self.algoname)
return api_response
except ApiException as e:
error_message = json.loads(e.body)
raise raiseAlgoApiError(error_message)
# Get info on an algorithm
def info(self, algo_hash=None):
try:
# Get Algorithm
if algo_hash:
api_response = self.client.manageApi.get_algorithm_hash_version(self.username, self.algoname, algo_hash)
else:
api_response = self.client.manageApi.get_algorithm(self.username, self.algoname)
return api_response
except ApiException as e:
error_message = json.loads(e.body)
raise raiseAlgoApiError(error_message)
# Get all versions of the algorithm, with the given filters
def versions(self, limit=None, marker=None, published=None, callable=None):
kwargs = {}
bools = ["True", "False"]
if limit:
kwargs["limit"] = limit
if marker:
kwargs["marker"] = marker
if published:
p = published
kwargs["published"] = str(p).lower() if str(p) in bools else p
if callable:
c = callable
kwargs["callable"] = str(c).lower() if str(c) in bools else c
try:
# Get Algorithm versions
api_response = self.client.manageApi.get_algorithm_versions(self.username, self.algoname, **kwargs)
return api_response
except ApiException as e:
error_message = json.loads(e.body)
raise raiseAlgoApiError(error_message)
# Compile an algorithm
def compile(self):
try:
# Compile algorithm
api_response = self.client.manageApi.algorithms_username_algoname_compile_post(self.username, self.algoname)
return api_response
except ApiException as e:
error_message = json.loads(e.body)
raise raiseAlgoApiError(error_message)
# Pipe an input into this algorithm
def pipe(self, input1):
if self.output_type == OutputType.raw:
return self._postRawOutput(input1)
elif self.output_type == OutputType.void:
return self._postVoidOutput(input1)
else:
return AlgoResponse.create_algo_response(self.client.postJsonHelper(self.url, input1, **self.query_parameters))
def _postRawOutput(self, input1):
# Don't parse response as json
self.query_parameters['output'] = 'raw'
response = self.client.postJsonHelper(self.url, input1, parse_response_as_json=False, **self.query_parameters)
# Check HTTP code and throw error as needed
if response.status_code == 400:
# Bad request
raise ApiError(response.text)
elif response.status_code == 500:
raise ApiInternalError(response.text)
else:
return response.text
def _postVoidOutput(self, input1):
self.query_parameters['output'] = 'void'
responseJson = self.client.postJsonHelper(self.url, input1, **self.query_parameters)
if 'error' in responseJson:
raise ApiError(responseJson['error']['message'])
else:
return AsyncResponse(responseJson)
| {
"repo_name": "algorithmiaio/algorithmia-python",
"path": "Algorithmia/algorithm.py",
"copies": "1",
"size": "8975",
"license": "mit",
"hash": 3424146257052005000,
"line_mean": 42.9950980392,
"line_max": 144,
"alpha_frac": 0.6338718663,
"autogenerated": false,
"ratio": 4.2096622889305815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5343534155230582,
"avg_score": null,
"num_lines": null
} |
'Algorithmia API Client (python)'
from Algorithmia.client import Client
from Algorithmia.handler import Handler
import sys
import sys
if sys.version_info[0] >= 3:
from adk import ADK
import os
apiKey = None
apiAddress = None
# Get reference to an algorithm using a default client
def algo(algoRef):
# Return algorithm reference using default client
return getDefaultClient().algo(algoRef)
def file(dataUrl):
return getDefaultClient().file(dataUrl)
def dir(dataUrl):
return getDefaultClient().dir(dataUrl)
def client(api_key=None, api_address=None, ca_cert=None):
return Client(api_key, api_address, ca_cert)
def handler(apply_func, load_func=lambda: None):
return Handler(apply_func, load_func)
# The default client to use, assuming the user does not want to construct their own
defaultClient = None
# Used internally to get default client
def getDefaultClient():
global defaultClient
# Check for default client, and ensure default API key has not changed
if defaultClient is None or defaultClient.apiKey is not apiKey:
# Construct default client
defaultClient = Client(apiKey)
return defaultClient
# Used internally to get default api client
def getApiAddress():
global apiAddress
if apiAddress is not None:
# First check for user setting Algorithmia.apiAddress = "XXX"
return apiAddress
elif 'ALGORITHMIA_API' in os.environ:
# Then check for system environment variable
return os.environ['ALGORITHMIA_API']
else:
# Else return default
return "https://api.algorithmia.com"
| {
"repo_name": "algorithmiaio/algorithmia-python",
"path": "Algorithmia/__init__.py",
"copies": "1",
"size": "1609",
"license": "mit",
"hash": -5208113613661092000,
"line_mean": 28.2545454545,
"line_max": 83,
"alpha_frac": 0.7277812306,
"autogenerated": false,
"ratio": 3.830952380952381,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5058733611552381,
"avg_score": null,
"num_lines": null
} |
'Algorithmia Data API Client (python)'
import json
import re
import os
import six
import tempfile
import Algorithmia
from Algorithmia.datafile import DataFile
from Algorithmia.data import DataObject, DataObjectType
from Algorithmia.errors import DataApiError
from Algorithmia.util import getParentAndBase, pathJoin
from Algorithmia.acl import Acl
class DataDirectory(DataObject):
def __init__(self, client, dataUrl):
super(DataDirectory, self).__init__(DataObjectType.directory)
self.client = client
# Parse dataUrl
self.path = re.sub(r'^data://|^/', '', dataUrl)
self.url = DataDirectory._getUrl(self.path)
@staticmethod
def _getUrl(path):
return '/v1/data/' + path
def set_attributes(self, response_json):
# Nothing to set for now
pass
def getName(self):
_, name = getParentAndBase(self.path)
return name
def exists(self):
# Heading a directory apparently isn't a valid operation
response = self.client.getHelper(self.url)
return (response.status_code == 200)
def create(self, acl=None):
'''Creates a directory, optionally include Acl argument to set permissions'''
parent, name = getParentAndBase(self.path)
json = { 'name': name }
if acl is not None:
json['acl'] = acl.to_api_param()
response = self.client.postJsonHelper(DataDirectory._getUrl(parent), json, False)
if (response.status_code != 200):
raise DataApiError("Directory creation failed: " + str(response.content))
def delete(self, force=False):
# Delete from data api
url = self.url
if force:
url += '?force=true'
result = self.client.deleteHelper(url)
if 'error' in result:
raise DataApiError(result['error']['message'])
else:
return True
def file(self, name):
return DataFile(self.client, pathJoin(self.path, name))
def files(self):
return self._get_directory_iterator(DataObjectType.file)
def dir(self, name):
return DataDirectory(self.client, pathJoin(self.path, name))
def dirs(self):
return self._get_directory_iterator(DataObjectType.directory)
def list(self):
return self._get_directory_iterator()
def get_permissions(self):
'''
Returns permissions for this directory or None if it's a special collection such as
.session or .algo
'''
response = self.client.getHelper(self.url, acl='true')
if response.status_code != 200:
raise DataApiError('Unable to get permissions:' + str(response.content))
content = response.json()
if 'acl' in content:
return Acl.from_acl_response(content['acl'])
else:
return None
def update_permissions(self, acl):
params = {'acl':acl.to_api_param()}
response = self.client.patchHelper(self.url, params)
if response.status_code != 200:
raise DataApiError('Unable to update permissions: ' + response.json()['error']['message'])
return True
def _get_directory_iterator(self, type_filter=None):
marker = None
first = True
while first or (marker is not None and len(marker) > 0):
first = False
url = self.url
query_params= {}
if marker:
query_params['marker'] = marker
response = self.client.getHelper(url, **query_params)
if response.status_code != 200:
raise DataApiError("Directory iteration failed: " + str(response.content))
responseContent = response.content
if isinstance(responseContent, six.binary_type):
responseContent = responseContent.decode()
content = json.loads(responseContent)
if 'marker' in content:
marker = content['marker']
else:
marker = None
if type_filter is DataObjectType.directory or type_filter is None:
for d in self._iterate_directories(content):
yield d
if type_filter is DataObjectType.file or type_filter is None:
for f in self._iterate_files(content):
yield f
def _iterate_directories(self, content):
directories = []
if 'folders' in content:
for dir_info in content['folders']:
d = DataDirectory(self.client, pathJoin(self.path, dir_info['name']))
d.set_attributes(dir_info)
directories.append(d)
return directories
def _iterate_files(self, content):
files = []
if 'files' in content:
for file_info in content['files']:
f = DataFile(self.client, pathJoin(self.path, file_info['filename']))
f.set_attributes(file_info)
files.append(f)
return files
class LocalDataDirectory():
def __init__(self, client, dataUrl):
self.client = client
# Parse dataUrl
self.path = dataUrl.replace('file://', '')
def set_attributes(self, response_json):
raise NotImplementedError
def getName(self):
raise NotImplementedError
def exists(self):
return os.path.isdir(self.path)
def create(self):
os.mkdir(self.path)
def delete(self, force=False):
os.rmdir(self.path)
def file(self, name):
return LocalDataFile(self.client, 'file://' + pathJoin(self.path, name))
def dir(self, name):
raise NotImplementedError
def list(self):
for x in os.listdir(self.path): yield x
def dirs(self, content):
for x in os.listdir(self.path):
if os.path.isdir(self.path+'/'+x): yield x
def files(self, content):
for x in os.listdir(self.path):
if os.path.isfile(self.path+'/'+x): yield x
| {
"repo_name": "algorithmiaio/algorithmia-python",
"path": "Algorithmia/datadirectory.py",
"copies": "1",
"size": "6003",
"license": "mit",
"hash": -2204779883851168800,
"line_mean": 31.625,
"line_max": 102,
"alpha_frac": 0.6006996502,
"autogenerated": false,
"ratio": 4.165857043719639,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017401401848182252,
"num_lines": 184
} |
'Algorithmia Data API Client (python)'
import re
import json
import six
import tempfile
from datetime import datetime
import os.path
import pkgutil
from Algorithmia.util import getParentAndBase
from Algorithmia.data import DataObject, DataObjectType
from Algorithmia.errors import DataApiError, raiseDataApiError
class DataFile(DataObject):
def __init__(self, client, dataUrl):
super(DataFile, self).__init__(DataObjectType.file)
self.client = client
# Parse dataUrl
self.path = re.sub(r'^data://|^/', '', dataUrl)
self.url = '/v1/data/' + self.path
self.last_modified = None
self.size = None
def set_attributes(self, attributes):
self.last_modified = datetime.strptime(attributes['last_modified'],'%Y-%m-%dT%H:%M:%S.%fZ')
self.size = attributes['size']
# Deprecated:
def get(self):
return self.client.getHelper(self.url)
# Get file from the data api
def getFile(self):
exists, error = self.existsWithError()
if not exists:
raise DataApiError('unable to get file {} - {}'.format(self.path, error))
# Make HTTP get request
response = self.client.getHelper(self.url)
with tempfile.NamedTemporaryFile(delete = False) as f:
for block in response.iter_content(1024):
if not block:
break;
f.write(block)
f.flush()
return open(f.name)
def getName(self):
_, name = getParentAndBase(self.path)
return name
def getBytes(self):
exists, error = self.existsWithError()
if not exists:
raise DataApiError('unable to get file {} - {}'.format(self.path, error))
# Make HTTP get request
return self.client.getHelper(self.url).content
def getString(self):
exists, error = self.existsWithError()
if not exists:
raise DataApiError('unable to get file {} - {}'.format(self.path, error))
# Make HTTP get request
return self.client.getHelper(self.url).text
def getJson(self):
exists, error = self.existsWithError()
if not exists:
raise DataApiError('unable to get file {} - {}'.format(self.path, error))
# Make HTTP get request
return self.client.getHelper(self.url).json()
def getNumpy(self):
exists, error = self.existsWithError()
if not exists:
raise DataApiError('unable to get file {} - {}'.format(self.path, error))
np_loader = pkgutil.find_loader('numpy')
if np_loader is not None:
import numpy as np
payload = self.client.getHelper(self.url).json()
return np.array(payload)
else:
raise DataApiError("Attempted to .getNumpy() file without numpy available, please install numpy.")
def exists(self):
# In order to not break backward compatability keeping this method to only return
# a boolean
exists, error = self.existsWithError()
return exists
def existsWithError(self):
response = self.client.headHelper(self.url)
if 'X-Error-Message' in response.headers:
error = response.headers['X-Error-Message']
else:
error = response.text
return (response.status_code == 200, error)
def put(self, data):
# Post to data api
# First turn the data to bytes if we can
if isinstance(data, six.string_types) and not isinstance(data, six.binary_type):
data = bytes(data.encode())
if isinstance(data, six.binary_type):
result = self.client.putHelper(self.url, data)
if 'error' in result:
raise raiseDataApiError(result)
else:
return self
else:
raise TypeError("Must put strings or binary data. Use putJson instead")
def putJson(self, data):
# Post to data api
jsonElement = json.dumps(data)
result = self.client.putHelper(self.url, jsonElement)
if 'error' in result:
raise raiseDataApiError(result)
else:
return self
def putFile(self, path):
# Post file to data api
with open(path, 'rb') as f:
result = self.client.putHelper(self.url, f)
if 'error' in result:
raise raiseDataApiError(result)
else:
return self
def putNumpy(self, array):
# Post numpy array as json payload
np_loader = pkgutil.find_loader('numpy')
if np_loader is not None:
import numpy as np
encoded_array = array.tolist()
self.putJson(encoded_array)
return self
else:
raise DataApiError("Attempted to .putNumpy() a file without numpy available, please install numpy.")
def delete(self):
# Delete from data api
result = self.client.deleteHelper(self.url)
if 'error' in result:
raise raiseDataApiError(result)
else:
return True
class LocalDataFile():
def __init__(self, client, filePath):
self.client = client
# Parse dataUrl
self.path = filePath.replace('file://', '')
self.url = '/v1/data/' + self.path
self.last_modified = None
self.size = None
def set_attributes(self, attributes):
self.last_modified = datetime.strptime(attributes['last_modified'],'%Y-%m-%dT%H:%M:%S.%fZ')
self.size = attributes['size']
# Get file from the data api
def getFile(self):
exists, error = self.existsWithError()
if not exists:
raise DataApiError('unable to get file {} - {}'.format(self.path, error))
return open(self.path)
def getName(self):
_, name = getParentAndBase(self.path)
return name
def getBytes(self):
exists, error = self.existsWithError()
if not exists:
raise DataApiError('unable to get file {} - {}'.format(self.path, error))
f = open(self.path, 'rb')
bts = f.read()
f.close()
return bts
def getString(self):
exists, error = self.existsWithError()
if not exists:
raise DataApiError('unable to get file {} - {}'.format(self.path, error))
with open(self.path, 'r') as f: return f.read()
def getJson(self):
exists, error = self.existsWithError()
if not exists:
raise DataApiError('unable to get file {} - {}'.format(self.path, error))
return json.loads(open(self.path, 'r').read())
def exists(self):
return self.existsWithError()[0]
def existsWithError(self):
return os.path.isfile(self.path), ''
def put(self, data):
# First turn the data to bytes if we can
if isinstance(data, six.string_types) and not isinstance(data, six.binary_type):
data = bytes(data.encode())
with open(self.path, 'wb') as f: f.write(data)
return self
def putJson(self, data):
# Post to data api
jsonElement = json.dumps(data)
result = localPutHelper(self.path, jsonElement)
if 'error' in result:
raise raiseDataApiError(result)
else:
return self
def putFile(self, path):
result = localPutHelper(path, self.path)
if 'error' in result:
raise raiseDataApiError(result)
else:
return self
def delete(self):
try:
os.remove(self.path)
return True
except:
raise DataApiError('Failed to delete local file ' + self.path)
def localPutHelper(path, contents):
try:
with open(path, 'wb') as f:
f.write(contents)
return dict(status='success')
except Exception as e: return dict(error=str(e))
| {
"repo_name": "algorithmiaio/algorithmia-python",
"path": "Algorithmia/datafile.py",
"copies": "1",
"size": "7937",
"license": "mit",
"hash": -8500454176323640000,
"line_mean": 32.4894514768,
"line_max": 112,
"alpha_frac": 0.5911553484,
"autogenerated": false,
"ratio": 4.068170169144029,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5159325517544029,
"avg_score": null,
"num_lines": null
} |
'''Algorithmically determine soft selector strings.
.. This software is released under an MIT/X11 open source license.
Copyright 2012-2014 Diffeo, Inc.
Soft selectors
==============
.. autofunction:: find_soft_selectors
.. autofunction:: make_ngram_corpus
'''
from __future__ import absolute_import, division, print_function
import argparse
from collections import defaultdict
import logging
from operator import itemgetter
import string
import dblogger
from gensim import corpora
import many_stop_words
from nltk.tokenize import RegexpTokenizer
from nltk.util import ngrams
import regex as re
import streamcorpus
from streamcorpus_pipeline._clean_html import clean_html
from streamcorpus_pipeline._clean_visible import clean_visible
import yakonfig
logger = logging.getLogger(__name__)
stop_words = many_stop_words.get_stop_words()
def find_soft_selectors(ids_and_clean_visible, start_num_tokens='10',
max_num_tokens='20', filter_punctuation='0'):
'''External interface for dossier.models.soft_selectors.
This at scans through `num_tokens` values between
`start_num_tokens` and `max_num_tokens` and calls
`find_soft_selectors_at_n` looking for results
All of the params can be passed from URL parameters, in which
case they can be strings and this function will type cast them
appropriately.
'''
start_num_tokens = int(start_num_tokens)
max_num_tokens = int(max_num_tokens)
filter_punctuation = bool(int(filter_punctuation))
if not ids_and_clean_visible:
logger.info('find_soft_selectors called with no ids_and_clean_visible')
return []
current_results = [] ## results from current n
previous_results = [] ## previous results from last n
overall_results = [] ## overall results to return
for num_tokens in range(start_num_tokens, max_num_tokens + 1):
## update this here
previous_results = current_results
results_at_n = find_soft_selectors_at_n(
ids_and_clean_visible, num_tokens, filter_punctuation)
if len(results_at_n) == 0:
break
best_score = results_at_n[0]['score']
## i.e. the initial condition is they all have the same score
idx_at_second = len(results_at_n)
for idx, result in enumerate(results_at_n):
if result['score'] < best_score:
idx_at_second = idx
break
current_results = results_at_n[0:idx_at_second]
if num_tokens == 8:
for r in results_at_n:
logger.info('%s --- score: %d' % (r['phrase'], r['score']))
if previous_results == []:
logger.info('Previous results are empty. Continuing.')
continue
## now, the main idea is to figure out if any strings from previous
## are substrings of those from current
## (with the scores fixed at the max for that subphrase).
## when they stop being substrings
## then those are completed phrases and should be returned as a result
for prev_result in previous_results:
is_subbed_and_same_score = False
for curr_result in current_results:
if prev_result['phrase'] in curr_result['phrase'] and \
prev_result['score'] == curr_result['score'] :
is_subbed_and_same_score = True
break
if not is_subbed_and_same_score: ## then it's a honest result
prev_result['n'] = num_tokens - 1
overall_results.append(prev_result)
if len(current_results) == 0:
## we got them all
## (we still had to collect the previous results)
## that's why this break comes after the previous for loop
break
## also add results from current_results at final n
for result in current_results:
result['n'] = num_tokens
overall_results.append(result)
## sort by score then by length
overall_results.sort(key=itemgetter('score', 'n'), reverse=True)
logger.info('OVERALL RESULTS: %d' % len(overall_results))
# for idx, result in enumerate(overall_results):
# logger.info('%d. %s --- score: %f , n = %d, hits=%d' %
# (idx, result['phrase'], result['score'], result['n'], len(result['hits']))
# )
return overall_results
def find_soft_selectors_at_n(ids_and_clean_visible, num_tokens,
filter_punctuation):
corpus_clean_visibles = map(itemgetter(1), ids_and_clean_visible)
corpus_cids = map(itemgetter(0), ids_and_clean_visible)
corpus_strings = make_ngram_corpus(
corpus_clean_visibles, num_tokens, filter_punctuation)
## make dictionary
dictionary = corpora.Dictionary(corpus_strings)
## make word vectors
corpus = map(dictionary.doc2bow, corpus_strings)
# ## train tfidf model
# tfidf = models.TfidfModel(corpus)
# ## transform coprus
# corpus_tfidf = tfidf[corpus]
## ignore tfidf
corpus_tfidf = corpus
## sum up tf-idf across the entire corpus
corpus_total = defaultdict(int)
inverted_index = defaultdict(set)
for doc_idx, doc in enumerate(corpus_tfidf):
for word_id, score in doc:
## uncomment when doing tf idf
# corpus_total[word_id] += score
## this way we only count once per doc
corpus_total[word_id] += 1
inverted_index[word_id].add(corpus_cids[doc_idx])
## order the phrases by score across the documents
corpus_ordered = sorted(
corpus_total.items(), key=itemgetter(1), reverse=True)
top_phrases = []
for word_id, score in corpus_ordered:
top_phrases.append({
'score': score,
'phrase': dictionary[word_id],
'hits': [{'content_id': cid, 'title': None}
for cid in inverted_index[word_id]],
})
return top_phrases
def make_ngram_corpus(corpus_clean_visibles, num_tokens, filter_punctuation,
zoning_rules=False):
'''takes a list of clean_visible texts, such as from StreamItems or
FCs, tokenizes all the texts, and constructs n-grams using
`num_tokens` sized windows.
``corpus_clean_visibles`` -- list of unicode strings
``num_tokens`` --- the n of the n-grams
``filter_punctuation`` --- if True, punctuation is filtered
'''
## TODO: generatlize this zoning code, so that it works on many
## sites in the HT domain; consider finishing streamcorpus-zoner
## to do this.
if filter_punctuation:
## word tokenizer that removes punctuation
tokenize = RegexpTokenizer(r'\w+').tokenize
backpage_string = 'backpage'
end_string = 'Poster'
else:
#tokenize = word_tokenize
tokenize = lambda s: string.split(s)
backpage_string = 'backpage.com'
end_string = 'Poster\'s'
corpus = list()
for clean_vis in corpus_clean_visibles:
## crudely skip pages that have "error"
if re.search(u'error', clean_vis, re.I & re.UNICODE):
continue
## make tokens
tokens = tokenize(clean_vis) ## already a unicode string
if zoning_rules:
## filter out non backpage pages
if backpage_string not in tokens:
continue
## string that signals the beginning of the body
try:
idx0 = tokens.index('Reply')
except:
continue
## string that signals the end of the body
try:
idx1 = tokens.index(end_string)
except:
continue
tokens = tokens[idx0:idx1]
## make ngrams, attach to make strings
ngrams_strings = list()
for ngram_tuple in ngrams(tokens, num_tokens):
# ## attempt to remove unwanted phrases
## score with many_stop_words and drop bad tuples
# stop_count = sum([int(bool(tok.lower() in stop_words))
# for tok in ngram_tuple])
# if stop_count > num_tokens / 1.5:
# continue
## remove ones with many repeated words
if len(set(ngram_tuple)) < len(ngram_tuple) / 2:
continue
## this adds ngrams for the current doc
ngrams_strings.append(' '.join(ngram_tuple))
## this adds a list of all the ngrams from the current doc
## to the corpus list
corpus.append(ngrams_strings)
return corpus
def ids_and_clean_visible_from_streamcorpus_chunk_path(corpus_path):
'''converts a streamcorpus.Chunk file into the structure that is
passed by the search engine to find_soft_selectors
'''
ch = clean_html(clean_html.default_config)
cv = clean_visible(clean_visible.default_config)
ids_and_clean_visible = []
for si in streamcorpus.Chunk(path=corpus_path):
if not si.body.clean_visible:
## attempt to make clean_visible
if not si.body.raw:
logger.critical('no raw content, so skipping: %r', si.abs_url)
continue
abs_url = si.abs_url
si = ch(si, {})
if not si:
logger.critical(
'failed to make clean_html, so skipping: %r', abs_url)
continue
si = cv(si, {})
if not si or not si.body.clean_visible:
logger.critical(
'failed to make clean_visible, so skipping: %r', abs_url)
continue
rec = (si.stream_id, si.body.clean_visible.decode('utf8'), {})
ids_and_clean_visible.append(rec)
return ids_and_clean_visible
def main():
parser = argparse.ArgumentParser(
'command line tool for debugging and development')
parser.add_argument('corpus', help='path to a streamcorpus.Chunk file')
parser.add_argument('-n', '--num-tokens', default=6, type=int,
help='the n of the ngrams; used as start_num_tokens '
'for scanning')
parser.add_argument('--max-num-tokens', default=40, type=int,
help='maximum number of `n` in n-grams for scanning')
parser.add_argument('--peak-score-delta', default=0.01, type=float,
help='delta in score values required between first '
'and second result to stop scanning')
parser.add_argument('--scan-window-size', default=False,
action='store_true',
help='if set, scans from the value of -n until it '
'finds a strongly peaked top value')
parser.add_argument('--filter-punctuation', default=False,
action='store_true',
help='filter out punctuation; default is to not '
'filter punctuation')
parser.add_argument('--show-ids', default=False, action='store_true',
help='show identifiers in diagnostic output')
args = yakonfig.parse_args(parser, [yakonfig, dblogger])
## TODO: if we start needing to load FC chunk files (instead of SI
## chunk files), this might need to be told which kind of chunk it
## is loading, and we'll need a second function along the lines of
## ids_and_clean_visible_from_streamcorpus_chunk_path
## mimic the in-process interface:
ids_and_clean_visible = ids_and_clean_visible_from_streamcorpus_chunk_path(
args.corpus)
logger.info('gathered %d texts', len(ids_and_clean_visible))
def format_result(result):
score, soft_selector_phrase, matching_texts = result
return '%.6f\t%d texts say:\t%s\t%s' % \
(score, len(matching_texts), soft_selector_phrase.encode('utf8'),
args.show_ids and repr(matching_texts) or '')
if args.scan_window_size:
best = find_soft_selectors(
ids_and_clean_visible,
start_num_tokens=args.num_tokens,
max_num_tokens=args.max_num_tokens,
filtered_punctuation=args.filter_punctuation)
if not best:
print('failed to find a best result!')
else:
print('found a best result:')
print('\n'.join(map(format_result, best)))
else:
results = find_soft_selectors_at_n(
ids_and_clean_visible, args.num_tokens, args.filter_punctuation)
print('\n'.join(map(format_result, results)))
if __name__ == '__main__':
main()
| {
"repo_name": "dossier/dossier.models",
"path": "dossier/models/soft_selectors.py",
"copies": "1",
"size": "12655",
"license": "mit",
"hash": 4051342648212833000,
"line_mean": 35.1571428571,
"line_max": 88,
"alpha_frac": 0.6005531411,
"autogenerated": false,
"ratio": 4.035395408163265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5135948549263265,
"avg_score": null,
"num_lines": null
} |
# Algorithm information
import re, sys, numpy, copy, math
from numpy.random import *
from xml.dom import minidom
#### Regular expression for implemented priors or posterior placeholder#############################
re_prior_const=re.compile('constant')
re_prior_uni=re.compile('uniform')
re_prior_normal=re.compile('normal')
re_prior_logn=re.compile('lognormal')
re_prior_posterior=re.compile('posterior')
#### Regular expression for implemented models (ODE/SDE)#############################
re_ODE=re.compile('ODE')
re_SDE=re.compile('SDE')
#### Regular Expression for True/None ##############################################################
re_true=re.compile('True')
re_none=re.compile('None')
#### Function to obtain weighted sample from posterior sample (taken from ABC Sys Bio package)######
def getWeightedSample(weights):
totals = []
running_total = 0
for w in weights:
running_total = running_total + w[0]
totals.append(running_total)
rnd = random() * running_total
for i, total in enumerate(totals):
if rnd < total:
return i
#### Function to obtain a single value from given node and tag in input xml file####################
def parse_required_single_value( node, tagname, message, cast ):
try:
data = node.getElementsByTagName(tagname)[0].firstChild.data
except:
print message
sys.exit()
ret = 0
try:
ret = cast( data )
except:
print message
sys.exit()
return(ret)
#### Function to obtain a vector of value from given node and tag in input xml file################
def parse_required_vector_value(self, node, tagname, message, cast ):
try:
data = node.getElementsByTagName(tagname)[0].firstChild.data
except:
print message
sys.exit()
tmp = str( data ).split()
ret = []
try:
ret = [ cast(i) for i in tmp ]
except:
print message
sys.exit()
if len(ret) == 0:
print message
sys.exit()
return(ret)
def type_parser (type, filename):
if re_ODE.match(type):
return 0
elif re_SDE.match(type):
print "\n\nERROR: Type of the model in " ,filename, self.name[self.nmodels-1], " is not supported!\n\n"
sys.exit()
else:
print "\n\nERROR: Type of the model ", self.name[self.nmodels-1], " is not supported in " +filename+ "!\n\n"
sys.exit()
#### Function to identify prior the user given prior and obtain its values#########################
#####
def process_prior(self, tmp ,filename):
prior_tmp = [0,0,0]
if re_prior_const.match( tmp[0] ):
prior_tmp[0] = 0
try:
prior_tmp[1] = float( tmp[1] )
except:
print "\n\nERROR: Value of the prior for experiment ", self.name[self.nmodels-1], "has the wrong format in" +filename+ "!\n\n"
sys.exit()
elif re_prior_normal.match( tmp[0] ):
prior_tmp[0] = 1
try:
prior_tmp[1] = float( tmp[1] )
prior_tmp[2] = float( tmp[2] )
except:
print "\n\nERROR: Value of the prior for experiment ", self.name[self.nmodels-1], "has the wrong format in" +filename+ "!\n\n"
sys.exit()
elif re_prior_uni.match( tmp[0] ):
prior_tmp[0] = 2
try:
prior_tmp[1] = float( tmp[1] )
prior_tmp[2] = float( tmp[2] )
except:
print "\n\nERROR: Value of the prior for experiment ", self.name[self.nmodels-1], "has the wrong format in" +filename+ "!\n\n"
sys.exit()
elif re_prior_logn.match( tmp[0] ):
prior_tmp[0] = 3
try:
prior_tmp[1] = float( tmp[1] )
prior_tmp[2] = float( tmp[2] )
except:
print "\n\nERROR: Value of the prior for experiment ", self.name[self.nmodels-1], "has the wrong format in" +filename+ "!\n\n"
sys.exit()
elif re_prior_posterior.match( tmp[0] ):
prior_tmp[0] = 4
else:
print "\n\nERROR: Supplied parameter prior in", filename , tmp[0], " is unsupported!\n\n"
sys.exit()
return prior_tmp
#### Function to parse a string into an integer#####################################################
def parseint(str):
try:
return int(str)
except ValueError:
return str
#### Function to parse a string into an integer and offset it by -1 to obtain the right index########
def parseint_index(str):
try:
out=int(str)-1
return out
except ValueError:
return str
#### Function to parse fitting information about species###########################################
#####
def parse_fitting_information( mod_str, node, species_number ):
fitref = node.getElementsByTagName(mod_str)[0]
tmp = str( fitref.firstChild.data ).split()
ret1 = []
if (tmp[0]=="All") and (len(tmp)==1):
ret_temp = []
for i in range(species_number):
ret_temp.append([i])
ret1.extend(ret_temp)
else:
for i in tmp:
ttmp = re.sub('species','', i )
ttmp = re.sub(r'\+', ' + ', ttmp)
ttmp = re.sub(r'\-', ' - ', ttmp)
ttmp = ttmp.split(" ")
ttmp_int = [ parseint_index(y) for y in ttmp]
ret1.append(ttmp_int)
return( ret1 )
#### Function to parse fitting information about other model parameter###############################
#####
def parse_fitting_information_parameters(mod_str, node, item, parameter_number):
fitref = node.getElementsByTagName(mod_str)[0]
tmp = str( fitref.firstChild.data ).split()
ret1 = []
if (tmp[0]=="All") and (len(tmp)==1):
for i in range(0,parameter_number):
ret1.append(i)
elif(tmp[0]=="None") and (len(tmp)==1):
ret1=[]
else:
for i in tmp:
ttmp = re.sub(item,'', i )
ret1.append(int(ttmp)-1)
return( ret1 )
class algorithm_info:
"""
A class to parse the user-provided input file, return all information required to run cuda-sim and hold associated data.
"""
def __init__(self, filename, combination_list):
xmldoc = minidom.parse(filename)
### mode is 0 inference, 1 simulate, 2 design
#self.mode = mode
### initialises attributes of the object
#### Global model/experiment attributes
self.modelnumber = 0
self.particles = 0
self.beta = 0
self.dt = 0
self.times = []
self.ntimes = 0
self.nspecies_all=0
self.nparameters_all = 0
self.sigma= 0
self.ncompparams = []
#### SDE specific attributes
#self.beta =
#self.cuda_SDE =
#### Posterior sample attributes
self.sampleFromPost = False
self.post_sample_file = ""
self.post_weight_file = ""
#### Attribute indicating if initial conditions are defined by priors
self.initialprior = False
#### Fit attributes
self.comp_fit= []
self.init_fit= []
self.param_fit = []
#### Sample attributes
self.N1sample = 0
self.N2sample = 0
self.N3sample = 0
self.N4sample = 0
#### Attribute holding combination of initial condtion, paramter changes which define models/experiments
self.combination = combination_list
#### Model/Experiment specific attributes
self.nmodels = 0
self.nparameters = []
self.nspecies = []
self.name = []
self.cuda = []
self.source = []
self.type = ""
self.prior = []
self.x0prior = []
self.compprior = []
self.fitSpecies = []
##################################################
## Required arguments
### get number of models
self.modelnumber = parse_required_single_value( xmldoc, "experimentnumber", "\n\nERROR: Please provide an integer value for <experimentnumber> in"+ filename+ "!", int )
#### gets type of model (ODE/SDE)#####################
self.type=type_parser (parse_required_single_value( xmldoc, "type", "\n\nERROR: Type of the model in"+ filename+ "is not supported!", str ).strip(),filename) ### convert type into integer and check for correct definition test
### get number of samples
self.particles = parse_required_single_value( xmldoc, "particles", "\n\nERROR: Please provide an integer value for <particles> in"+ filename+ "!", int )
### get dt
self.dt = parse_required_single_value( xmldoc, "dt", "\n\nERROR: Please provide an float value for <dt> in"+ filename+ "!", float )
### indetifies data node in input xml file
dataref = xmldoc.getElementsByTagName('data')[0]
### get timepoints
self.times = parse_required_vector_value(self, dataref, "times", "\n\nERROR: Please provide a white space separated list of values for <data><times> in"+ filename+ "!" , float )
self.ntimes = len(self.times)
#### check if times are in ascending order
### get global number of parameters
self.nparameters_all = parse_required_single_value(dataref, "nparameters_all", "\n\nERROR: Please provide an integer value for <data><nparameters_all> in"+ filename+ "!", int)
### get sigma
self.sigma = parse_required_single_value(dataref, "sigma", "\n\nERROR: Please provide a float value for <data><sigma> in"+ filename+ "!", float)
### get information about sample from posterior
if parse_required_single_value( dataref, "samplefrompost", "\n\nERROR: Please provide a boolean value for <samplefrompost> in"+ filename+ "!", str ).strip()=="True":
self.sampleFromPost = True
self.post_sample_file = parse_required_single_value( dataref, "samplefrompost_file", "\n\nERROR: Please provide a file name for <samplefrompost_file> in"+ filename+ "!", str ).strip()
self.post_weight_file = parse_required_single_value( dataref, "samplefrompost_weights", "\n\nERROR: Please provide a file name for <samplefrompost_weights> in"+ filename+ "!", str ).strip()
else:
self.sampleFromPost = False
if parse_required_single_value( dataref, "initialprior", "\n\nERROR: Please provide a boolean value for <initialprior> in"+ filename+ "!", str ).strip()=="True":
self.initialprior = True
else:
self.initialprior = False
#### get sizes of N1, N2, N3 and N4 samples
nsampleref = xmldoc.getElementsByTagName('nsamples')[0]
self.N1sample = parse_required_single_value( dataref, "N1", "\n\nERROR: Please provide an integer value for <nsamples><N1> in"+ filename+ "!", int )
self.N2sample = parse_required_single_value( dataref, "N2", "\n\nERROR: Please provide an integer value for <nsamples><N2> in"+ filename+ "!", int )
self.N3sample = parse_required_single_value( dataref, "N3", "\n\nERROR: Please provide an integer value for <nsamples><N3> in"+ filename+ "!", int )
self.N4sample = parse_required_single_value( dataref, "N4", "\n\nERROR: Please provide an integer value for <nsamples><N4> in"+ filename+ "!", int )
if (self.N1sample+self.N2sample+self.N3sample+self.N4sample)>self.particles:
print "\n\nERROR: The sum of N1, N2, N3 and N4 is bigger than given particle number in"+ filename+ "!"
sys.exit()
### get model attributes
modelref = xmldoc.getElementsByTagName('experiments')[0]
for m in modelref.childNodes:
if m.nodeType == m.ELEMENT_NODE:
self.nmodels += 1
self.prior.append([])
self.x0prior.append([])
self.compprior.append([])
#### gets name of experiment##################
try:
self.name.append( str(m.getElementsByTagName('name')[0].firstChild.data).strip() )
except:
print "\n\nERROR: Please provide a string value for <name> for experiment ", self.nmodels, " in ", filename, "!"
sys.exit()
#### gets name of associated SBML file###############
try:
self.source.append( str(m.getElementsByTagName('source')[0].firstChild.data).strip() )
except:
print "\n\nERROR: Please provide an string value for <source> for experiment ", self.nmodels, " in ", filename, "!"
sys.exit()
#### gets name of associated CUDA file#############
try:
self.cuda.append( str(m.getElementsByTagName('cuda')[0].firstChild.data).strip() )
except:
print "\n\nERROR: Please provide an string value for <cuda> for experiment ", self.nmodels, " in ", filename, "!"
sys.exit()
nparameter = 0
ncompparam = 0
#### gets priors of compartment if defined ##########
try:
compref = m.getElementsByTagName('compartments')[0]
for p in compref.childNodes:
if p.nodeType == p.ELEMENT_NODE:
ncompparam += 1
prior_tmp = [0,0,0]
tmp = str( p.firstChild.data ).split()
self.compprior[self.nmodels-1].append( process_prior(self, tmp ,filename) )
except:
ncompparam = 0
#### get priors of model parameters##################
paramref = m.getElementsByTagName('parameters')[0]
for p in paramref.childNodes:
if p.nodeType == p.ELEMENT_NODE:
nparameter += 1
prior_tmp = [0,0,0]
tmp = str( p.firstChild.data ).split()
self.prior[self.nmodels-1].append( process_prior(self, tmp, filename) )
#### get priors/constants for initial conditions###
ninit = 0
initref = m.getElementsByTagName('initial')[0]
for inn in initref.childNodes:
if inn.nodeType == inn.ELEMENT_NODE:
ninit += 1
prior_tmp = [0,0,0]
tmp = str( inn.firstChild.data ).split()
self.x0prior[self.nmodels-1].append( process_prior(self, tmp, filename) )
#### get measurable species of experiment###############
try:
self.fitSpecies.append( parse_fitting_information('measuredspecies', m, ninit ) )
except:
print "\n\nERROR: Measurable species are not defined properly with <measuredspecies> ... </measuredspecies> for experiment", self.nmodels, " in ", filename, "!"
sys.exit()
#### Error checking for parameter prior, initial condtion prior and measurable species
if nparameter == 0:
print "\n\nERROR: No parameters specified in experiment ", self.nmodels, " in ", filename, "!"
sys.exit()
if ninit == 0:
print "\n\nERROR: No initial conditions specified in experiment ", self.nmodels, " in ", filename, "!"
sys.exit()
if len(self.fitSpecies[self.nmodels-1])==0:
print "\n\nERROR: No measurable species specified in experiment ", self.nmodels, " in ", filename, "!"
sys.exit()
self.nparameters.append( nparameter )
self.nspecies.append( ninit )
self.ncompparams.append( ncompparam )
if self.nmodels == 0:
print "\n\nERROR: No experiments specified in "+ filename+ "!"
sys.exit()
### checks if all experiments have the same number of species
if len(set(self.nspecies))==1:
self.nspecies_all = list(set(self.nspecies))[0]
else:
print "\n\nERROR: Models don't have the same number of species in " +filename+ "!"
sys.exit()
### checks if all experiments have the same number of compartments
if len(set(self.ncompparams))==1: #and list(set(self.ncompparams))[0]!=0:
self.ncompparams_all = list(set(self.ncompparams))[0]
elif len(set(self.ncompparams))!=1:
print "\n\nERROR: Experimental models don't have the same number of compartments in" +filename+ "!"
sys.exit()
### checks if all experiments have the same number of parameters
if (len(set(self.nparameters))!=1) or (self.nparameters_all != list(set(self.nparameters))[0]):
print "\n\nERROR: Experimental models don't have the same number of parameters in" +filename+ "!"
sys.exit()
### get paramter fit
try:
self.param_fit =( parse_fitting_information_parameters('paramfit', dataref, 'parameter' ,self.nparameters_all ) )
except:
print "\n\nERROR: Parameters to be fitted are not defined properly in <paramfit> ... </paramfit> in" +filename+ "!"
sys.exit()
###
### get initial fit
try:
self.init_fit =( parse_fitting_information_parameters('initfit', dataref, 'initial' ,self.nspecies_all ) )
except:
print "\n\nERROR: Initial conditions to be fitted are not defined properly in <initfit> ... </initfit> in" +filename+ "!"
sys.exit()
###
### get compartment fit
try:
self.comp_fit=( parse_fitting_information_parameters('compfit', dataref, 'compartment' ,self.ncompparams_all ) )
except:
print "\n\nERROR: Compartments to be fitted are not defined properly in <compfit> ... </compfit> in" +filename+ "!"
sys.exit()
###
# A method which prints out details about the models/experiments defined in the object
def print_info(self):
print "\nALGORITHM INFO"
print "experimentnumber:", self.modelnumber
print "samples:", self.particles
print "dt:", self.dt
print "type:", self.type
print "parameters:", self.nparameters_all
print "nspecies:", self.nspecies_all
print "ncompparams:", self.ncompparams_all
print "sample from posterior:", bool(self.sampleFromPost)
print "sample file:", self.post_sample_file
print "weight file:", self.post_weight_file
print "parameter fit:", self.param_fit
print "initial condition fit:", self.init_fit
print "compartment fit:", self.comp_fit
print "initial prior:", self.initialprior
print "sigma:", self.sigma
print "N1:", self.N1sample
print "N2:", self.N2sample
print "N3:", self.N3sample
print "N4:", self.N4sample
print "times:", self.times
print "EXPERIMENTS:", self.nmodels
for i in range(self.nmodels):
print "\t", "npar:", self.nparameters[i]
print "\t", "nspecies:", self.nspecies[i]
print "\t", "ncompparams:", self.ncompparams[i]
print "\t", "name:", self.name[i]
print "\t", "source:", self.source[i]
print "\t", "measured species:", self.fitSpecies[i]
print "\t", "init:", self.x0prior[i]
print "\t", "prior:", self.prior[i]
print "\t", "comp_prior:", self.compprior[i]
print "\n"
# A method to sample from the priors
##(method gets called by sorting_files)
##Arguments:
##inputpath - if a sample for the prior is given then inputpath is where this file is
##usesbml - input indicating whether an SBML file is used or not
def THETAS(self, inputpath="", usesbml = False):
#Compartments are always defined in SBML file and so if local code used but number of compartments > 0 then change usesbml to TRUE in order to sample from compartment prior
if self.ncompparams_all!=0:
usesbml = True
#If statement determines whether to sample from a given sample or use an in-built one
if self.sampleFromPost==False:
#Sets up matrix for parameters
parameters = numpy.zeros([self.particles,self.nparameters_all])
#Loop through number of parameters
for j in range(len(self.prior[0])):
#####Constant prior#####
if(self.prior[0][j][0]==0):
parameters[:,j] = self.prior[0][j][1]
#####Uniform prior#####
elif(self.prior[0][j][0]==2):
parameters[:,j] = uniform(low=self.prior[0][j][1], high=self.prior[0][j][2], size=(self.particles))
#####Normal prior#####
elif(self.prior[0][j][0]==1):
parameters[:,j] = normal(loc=self.prior[0][j][1], scale=self.prior[0][j][2], size=(self.particles))
#####Lognormal prior#####
elif(self.prior[0][j][0]==3):
parameters[:,j] = lognormal(mean=self.prior[0][j][1], sigma=self.prior[0][j][2], size=(self.particles))
####If prior not defined####
else:
print " Prior distribution not defined for parameters"
sys.exit()
#If the initial conditions have a prior distribution over them then sample
if self.initialprior == True:
#Sets up initial conditions array
species = numpy.zeros([self.particles,self.nspecies_all])
#Loop through number of species
for j in range(len(self.x0prior[0])):
#####Constant prior#####
if(self.x0prior[0][j][0]==0):
species[:,j] = self.x0prior[0][j][1]
#####Uniform prior#####
elif(self.x0prior[0][j][0]==2):
species[:,j] = uniform(low=self.x0prior[0][j][1], high=self.x0prior[0][j][2], size=(self.particles))
#####Normal prior#####
elif(self.x0prior[0][j][0]==1):
species[:,j] = normal(loc=self.x0prior[0][j][1], scale=self.x0prior[0][j][2], size=(self.particles))
#####Lognormal prior#####
elif(self.x0prior[0][j][0]==3):
species[:,j] = lognormal(mean=self.x0prior[0][j][1], sigma=self.x0prior[0][j][2], size=(self.particles))
####If prior not defined####
else:
print " Prior distribution not defined on initial conditions"
sys.exit()
#If using constant initial conditions then create species matrix
else:
#Finds the set of initial conditions (no repeated elements)
x0prior_uniq = [self.x0prior[0]]
for ic in self.x0prior[1:]:
if ic not in x0prior_uniq:
x0prior_uniq.append(ic)
#Sets up the species array
species = [numpy.zeros([self.particles,self.nspecies_all]) for x in range(len(x0prior_uniq))]
#Loop over each initial condition
for ic in range(len(x0prior_uniq)):
#Loop through number of species
for j in range(len(x0prior_uniq[ic])):
#####Constant prior#####
if(x0prior_uniq[ic][j][0]==0):
species[ic][:,j] = x0prior_uniq[ic][j][1]
####If prior not defined####
else:
print " Prior distribution not defined on initial conditions"
sys.exit()
#If usesbml ==True then means that compartments are used
if usesbml == True:
#Sets up compartment matrix
compartments = numpy.zeros([self.particles,self.ncompparams_all])
#Loop through number of compartments
for j in range(len(self.compprior[0])):
#####Constant prior#####
if(self.compprior[0][j][0]==0): # j paramater self.index
compartments[:,j] = self.compprior[0][j][1]
#####Uniform prior#####
elif(self.compprior[0][j][0]==2):
compartments[:,j] = uniform(low=self.compprior[0][j][1], high=self.compprior[0][j][2], size=(self.particles))
#####Normal prior#####
elif(self.compprior[0][j][0]==1):
compartments[:,j] = normal(loc=self.compprior[0][j][1], scale=self.compprior[0][j][2], size=(self.particles))
#####Lognormal prior#####
elif(self.compprior[0][j][0]==3):
compartments[:,j] = lognormal(mean=self.compprior[0][j][1], sigma=self.compprior[0][j][2], size=(self.particles))
####If prior not defined#####
else:
print " Prior distribution not defined on compartments"
sys.exit()
#If a sample from the prior is given then go here
#obtain Thetas from posterior sample and associated weights
elif self.sampleFromPost==True:
######Reading in sample from posterior#####
infileName = inputpath+"/"+self.post_sample_file
in_file=open(infileName, "r")
param=[]
counter=0
for in_line in in_file.readlines():
in_line=in_line.rstrip()
param.append([])
param[counter]=in_line.split(" ")
param[counter] = map(float, param[counter])
counter=counter+1
in_file.close
######Reading in weigths associated to sample from posterior#####
infileName = inputpath+"/"+self.post_weight_file
in_file=open(infileName, "r")
weights=[]
counter2=0
for in_line in in_file.readlines():
in_line=in_line.rstrip()
weights.append([])
weights[counter2]=in_line.split(" ")
weights[counter2] = map(float, weights[counter2])
counter2=counter2+1
in_file.close
#If no compartments then fall here
if usesbml == False:
####Obtain Theta from posterior samples through weigths####
#Checks to make sure the number of samples is equal to the number of weights
if(counter==counter2):
#Sets up arrays for parameters and initial conditions
parameters = numpy.zeros( [self.particles,self.nparameters_all] )
species = numpy.zeros([self.particles,self.nspecies_all])
#For loop draws samples one at a time
for i in range(self.particles):
#Function that gives sample
index = getWeightedSample(weights)
parameters[i,:] = param[index][:self.nparameters_all]
species[i,:] = param[index][-self.nspecies_all:]
else:
print "Please provide equal number of particles and weights in model!"
sys.exit()
#If compartments used then fall here
elif usesbml == True:
####Obtain Theta from posterior samples through weigths####
#Checks to make sure the number of samples is equal to the number of weights
if(counter==counter2):
#Sets up arrays for compartments, parameters and initial conditions
compartments = numpy.zeros([self.particles,self.ncompparams_all])
parameters = numpy.zeros( [self.particles,self.nparameters_all] )
species = numpy.zeros([self.particles,self.nspecies_all])
#For loop draws samples one at a time
for i in range(self.particles):
#Function that gives sample
index = getWeightedSample(weights)
compartments[i,:] = param[index][:self.ncompparams_all]
parameters[i,:] = param[index][self.ncompparams_all:self.ncompparams_all+self.nparameters_all] #self.index indefies list which is used to assign parameter value. j corresponds to different parameters defines column
species[i,:] = param[index][-self.nspecies_all:]
else:
print "Please provide equal number of particles and weights in model!"
sys.exit()
#Approach 2 requires a different way to sampling from the prior and is dealt with here
if self.analysisType == 1:
#Extracts the N3 sample from the parameter matrix
paramsN3 = parameters[(self.particles-self.N3sample):,:]
#Sets up the N1xN3 parameter sample
params_final = numpy.concatenate((paramsN3,)*self.N1sample,axis=0)
#For loop changes parameters in N1xN3 sample to be the same as the parameter that is to be estimated in the N1 sample
for j in range(0,self.N1sample):
for i in self.param_fit:
params_final[range((j*self.N3sample),((j+1)*self.N3sample)),i] = parameters[j,i]
#Concatenates the N1 and N2 sample with the N1xN3 sample
parameters = numpy.concatenate((parameters[range(self.particles-self.N3sample),:],params_final),axis=0)
#Has to do the same if the initial conditions were drawn from a prior
if self.initialprior == True:
#Extracts the N3 sample from the initial values matrix
speciesN3 = species[(self.particles-self.N3sample):,:]
#Sets up the N1xN3 initial values sample
species_final = numpy.concatenate((speciesN3,)*self.N1sample,axis=0)
#For loop changes initial conditions in N1xN3 sample to be the same as the initial conditions that is to be estimated in the N1 sample
for j in range(0,self.N1sample):
for i in self.init_fit:
species_final[range((j*self.N3sample),((j+1)*self.N3sample)),i] = species[j,i]
#Concatenates the N1 and N2 sample with the N1xN3 sample
species = numpy.concatenate((species[range(self.particles-self.N3sample),:],species_final),axis=0)
else:
#Just creates a new intial values matrix that is the same but of size (N1+N2+N1xN3)x(number of species) since initial values are constant
for ic in range(len(species)):
species[ic] = numpy.tile(species[ic][0,:],(self.N1sample*self.N3sample+self.N1sample+self.N2sample,1))
#Has to do the same if compartments are used
if usesbml == True:
#Extracts the N3 sample from the initial values matrix
compsN3 = compartments[(self.particles-self.N3sample):,:]
#Sets up the N1xN3 initial values sample
comp_final = numpy.concatenate((compsN3,)*self.N1sample,axis=0)
#For loop changes compartments in N1xN3 sample to be the same as the compartmetns that is to be estimated in the N1 sample
for j in range(0,self.N1sample):
for i in self.comp_fit:
comp_final[range((j*self.N3sample),((j+1)*self.N3sample)),i] = compartments[j,i]
#Concatenates the N1 and N2 sample with the N1xN3 sample
compartments = numpy.concatenate((compartments[range(self.particles-self.N3sample),:],comp_final),axis=0)
#Creates attribute for object with compartment sample
if usesbml == True:
self.compsSample = compartments
#Need to treat each approach differently
if self.analysisType !=2:
#For approach 1 and 2 just simply make attributes
self.parameterSample = parameters
self.speciesSample = species
#For approach 3 need split up samples between those used by reference and those used by the experiments
elif self.analysisType == 2:
#Extract N4 sample
self.N4parameterSample = parameters[self.N1sample+self.N2sample+self.N3sample:self.N1sample+self.N2sample+self.N3sample+self.N4sample,:]
#Set attribute in reference model to N1, N2 and N3 sample
self.parameterSample = parameters[:self.N1sample+self.N2sample+self.N3sample,:]
#If prior used for initial conditions then do the same as parameters before
if self.initialprior == True:
#Extract the N4 sampel
self.N4speciesSample = species[self.N1sample+self.N2sample+self.N3sample:self.N1sample+self.N2sample+self.N3sample+self.N4sample,:]
#Set attribute in reference model to N1, N2 and N3 sample
self.speciesSample = species[:self.N1sample+self.N2sample+self.N3sample,:]
else:
#Otherwise just set the constant initial values to attribute
self.speciesSample = [x[:self.N1sample+self.N2sample+self.N3sample,:] for x in species]
#If compartments used
if usesbml == True:
#Extract N4 sample
self.N4compsSample = compartments[self.N1sample+self.N2sample+self.N3sample:self.N1sample+self.N2sample+self.N3sample+self.N4sample,:]
#Create attribute for reference object
self.compsSample = compartments[:self.N1sample+self.N2sample+self.N3sample,:]
#Reset the total number of particles
self.particles -= self.N4sample
#Set N4 to 0 as it is not used for the reference model
self.N4sample = 0
# A method to obtain the approach
##(method gets called by sorting_files)
##Arguments:
##analysisType - type of approach
def getAnalysisType(self,analysisType):
self.analysisType = analysisType
# A method to match each cudacode file to a list of initial conditions
##(method gets called by sorting_files)
##No arguments
def getpairingCudaICs(self):
#Initialises dictionary
self.pairParamsICS = {}
#First detect whether using given prior sample
if self.sampleFromPost == False:
#If using prior over initials then simply create dictionary with key of cuda code file and values with the prior distribution of the initial conditions
if self.initialprior == True:
for Cfile in set(self.cuda):
self.pairParamsICS[Cfile] = [self.x0prior[j] for j in [i for i, x in enumerate(self.cuda) if x == Cfile]][0]
#If using constant initials then keys are cuda code and values is a list of the initial conditions used with the cuda code file
elif self.initialprior == False:
for Cfile in set(self.cuda):
temp = [[l[1] for l in self.x0prior[j]] for j in [i for i, x in enumerate(self.cuda) if x == Cfile]]
temp_uniq = [temp[0]]
for ic in temp[1:]:
if ic not in temp_uniq:
temp_uniq.append(ic)
self.pairParamsICS[Cfile] = temp_uniq
# A method to that sorts the output from CUDA-sim
##(method gets called by run_cudasim)
##Arguments:
##cudaorder - order in which cudacode files are passed in CUDA-sim
##cudaout - output from cudasim
def sortCUDASimoutput(self,cudaorder,cudaout):
#Sets up attribute for output of CUDA-sim
self.cudaout=[""]*len(self.cuda)
#If approach is of type 2 then total particles is N1+N2+N1xN3 otherwise just sum of Ni
if self.analysisType == 1:
Nparticles = self.N1sample+self.N2sample+self.N1sample*self.N3sample
else:
Nparticles = self.particles
#cuda_NAs holds the number of particles which don't result in NAs from cuda-sim
##Format of cuda_NAs:
##For approach 1 and 3: {"Exp_1.cu":[N1,N2,N3,N4],"Exp_2.cu:[N1,N2,N3,N4],...etc"}
##For approach 2: {"Exp_1.cu":[N1,N2,[N3_1,...,N3_N1]],"Exp_2.cu:[N1,N2,[N3_1,...,N3_N1]],...etc"}
cuda_NAs = dict((k, []) for k in cudaorder)
#For loop iterates over distinct cuda codes
for i, cudafile in enumerate(cudaorder):
#Index_NA finds the particle indices for which there are NAs
index_NA = [p for p, e in enumerate(numpy.isnan(numpy.sum(numpy.sum(cudaout[i][:,:,:],axis=2),axis=1))) if e==True]
#Detects whether we have priors over the initial conditions or not and sets pairings_ICs for iteration over in the next part
if self.initialprior == False:
pairing_ICs = enumerate(self.pairParamsICS.values()[i])
else:
pairing_ICs = enumerate(range(1))
#Iteration over different initial conditions
for j, IC in pairing_ICs:
#Splits up the NAs belong to each specific initial condition
index_NA_IC = [s for s in index_NA if s < (j+1)*Nparticles and s >= j*Nparticles]
#Need to distinguish between approach 2 and the others as NAs handled differently
if self.analysisType !=1:
#For approach 1 and 3 simply count the NAs and remove
N1_NA = [x for x in index_NA_IC if x < j*Nparticles + self.N1sample]
N2_NA = [x for x in index_NA_IC if x < j*Nparticles + self.N1sample+self.N2sample and x >= j*Nparticles + self.N1sample]
N3_NA = [x for x in index_NA_IC if x < j*Nparticles + self.N1sample+self.N2sample+self.N3sample and x >= j*Nparticles + self.N1sample + self.N2sample]
N4_NA = [x for x in index_NA_IC if x < j*Nparticles + self.N1sample+self.N2sample+self.N3sample+self.N4sample and x >= j*Nparticles + self.N1sample + self.N2sample+self.N3sample]
#Key is the cuda code file string and the values is a list corresponding to the remaining samples after removing NAs
cuda_NAs[cudafile].append([self.N1sample-len(N1_NA),self.N2sample-len(N2_NA),self.N3sample-len(N3_NA),self.N4sample-len(N4_NA)])
#Approach 2 needs to be considered more carefully
#If there is an NA for N1 then need to remove all of the corresponding N3
#If there is an NA in an N3 then the N1 which this corresponds to has a different N3, i.e. after removing NAs
elif self.analysisType == 1:
#start and end are the position for the N1xN3 sample for a specific initial condition
start = j*Nparticles + self.N1sample+self.N2sample
end = j*Nparticles + self.N1sample+self.N2sample + self.N1sample*self.N3sample
#Counts NAs in N1 and N2
N1_NA = [x for x in index_NA_IC if x < j*Nparticles + self.N1sample]
N2_NA = [x for x in index_NA_IC if x < j*Nparticles + self.N1sample+self.N2sample and x >= j*Nparticles + self.N1sample]
new_N2 = self.N2sample-len(N2_NA)
#Calculates the number of NAs in N1xN3 sample but seperates out between each N3 belonging to a specific N1
additional_N1N3_NAs = [range(int(j*Nparticles + self.N1sample + self.N2sample + x*self.N3sample),int(j*Nparticles + self.N1sample + self.N2sample + (x+1)*self.N3sample)) for x in N1_NA - j*Nparticles*numpy.ones([len(N1_NA)])]
y = []
for temp in additional_N1N3_NAs:
y+=temp
additional_N1N3_NAs = y
#Collects all detected NAs into one list
index_NA_IC = list(set().union(index_NA_IC,additional_N1N3_NAs))
#Finds the NAs in the N1xN3 sample
N1N3_NA = [x for x in index_NA_IC if x < end and x >= start]
#Finds the remaining particles in N1xN3 sample
remaining_N1N3 = [item for item in range(start,end) if item not in N1N3_NA]
#Splits up the N1xN3 into a list for each N1
keep_N1N3 = [[z for z in y if z not in index_NA_IC] for y in [list(range(x,x+self.N3sample)) for x in range(start,end,self.N3sample)]]
#Finds the number remaining in the N1xN3 sample
new_N1N3 = [len(x) for x in keep_N1N3 if len(x)!=0]
#Adds to indicies to remove
index_NA_IC = set().union(index_NA_IC, [x+j*Nparticles for x,y in enumerate(new_N1N3) if y == 0])
#Calculates new NAs in N1 sample
N1_NA = [x for x in index_NA_IC if x < j*Nparticles + self.N1sample]
new_N1 = self.N1sample-len(N1_NA)
#Sets the number remaining in the sample
cuda_NAs[cudafile].append([new_N1,new_N2,new_N1N3])
#Adds NAs to entire list index_NA
index_NA = list(set().union(index_NA,index_NA_IC))
#Removes the NAs in the cuda output
cudaout[i] = numpy.delete(cudaout[i], index_NA, axis=0)
#Sets the size of the sample to attribute after NAs removed
self.cudaout_structure = cuda_NAs
#Copies the correct cuda output to the correct experiment
if self.initialprior == False:
#For loop over the cuda codes in the experiments
for model, cudafile in enumerate(self.cuda):
#Picks corresponding sample for each cuda code
cudaout_temp = cudaout[cudaorder.index(cudafile)]
#Finds which initial condition this experiment has
pos = self.pairParamsICS[cudafile].index([x[1] for x in self.x0prior[model]])
#Find total number of particles associated with cuda file
if self.analysisType!=1:
size_cudaout_start = [sum(cuda_NAs[cudafile][x]) for x in range(pos-1)]
size_cudaout_start = sum(size_cudaout_start)
size_cudaout_end = size_cudaout_start + sum(cuda_NAs[cudafile][pos])
else:
size_cudaout_start = [sum([cuda_NAs[cudafile][x][0]]+[cuda_NAs[cudafile][x][1]]+cuda_NAs[cudafile][x][2]) for x in range(pos-1)]
size_cudaout_start = sum(size_cudaout_start)
size_cudaout_end = size_cudaout_start + sum([cuda_NAs[cudafile][pos][0]]+[cuda_NAs[cudafile][pos][1]]+cuda_NAs[cudafile][pos][2])
#Copies associated cudaoutput to corresponding experiement
self.cudaout[model] = cudaout_temp[size_cudaout_start:size_cudaout_end,:,:]
#If priors over initial conditions then assign just corresponding cuda code
else:
for model, cudafile in enumerate(self.cuda):
self.cudaout[model] = cudaout[cudaorder.index(cudafile)]
#Calls fitSort() which accounts for which species are measurable
print "-----Sorting out measurable species-----"
self.fitSort()
#Adds noise to the N1 trajectories if ODEs used
if self.type == 0:
print "-----Adding noise to CUDA-Sim outputs-----"
self.addNoise(cudaorder)
# A method to add noise to the N1 sample
##(method gets called by sortCUDASimoutput)
##Arguments:
##cudaorder - order in which cudacode files are passed in CUDA-sim
def addNoise(self,cudaorder):
#Sets attribute to hold trajectories
self.trajectories=[""]*len(self.cuda)
if self.initialprior == False:
#Iterates over cuda code files for each experiment
for model, cudafile in enumerate(self.cuda):
#Finds position of initial condition used for a cuda code file
pos = self.pairParamsICS[cudafile].index([x[1] for x in self.x0prior[model]])
#Finds the corresponding N1 to that cuda file
N1_temp = self.cudaout_structure[cudafile][pos][0]
#Samples the noise
noise = normal(loc=0.0, scale=self.sigma, size=(N1_temp,len(self.times),len(self.fitSpecies[model])))
#Adds the noise to the trajectories
self.trajectories[model] = self.cudaout[model][:N1_temp,:,:] + noise
else:
#Iterates over the cuda files for each experiment
for model, cudafile in enumerate(self.cuda):
#Finds N1 corresponding to that cuda file
N1_temp = self.cudaout_structure[cudafile][0][0]
#Samples noise
noise = normal(loc=0.0, scale=self.sigma, size=(N1_temp,len(self.times),len(self.fitSpecies[model])))
#Adds noise to trajectories
self.trajectories[model] = self.cudaout[model][:N1_temp,:,:] + noise
# A method to sort out measurable species
##(method gets called by sortCUDASimoutput)
##No arguments
def fitSort(self):
#Iterates over the potential measurable species for each experiment
for i, exp_n in enumerate(self.cudaout):
#Initiates an array to store changes to cuda output depending upon the measurable species
cudaout_temp = numpy.zeros((exp_n.shape[0],exp_n.shape[1],len(self.fitSpecies[i])))
#Iterates over each fittable species to the experiment
for j, fit in enumerate(self.fitSpecies[i]):
#Picks out measurable species if not combined with other
if len(fit) == 1:
cudaout_temp[:,:,j] = exp_n[:,:,fit[0]]
#Calculates the fits if combinations required e.g. species1+species2-species3
else:
if fit[1] == "+":
cudaout_temp[:,:,j] = exp_n[:,:,fit[0]] + exp_n[:,:,fit[2]]
elif fit[1] == "-":
cudaout_temp[:,:,j] = exp_n[:,:,fit[0]] - exp_n[:,:,fit[2]]
for k, part in enumerate(fit[3::2]):
if part == "+":
cudaout_temp[:,:,j] += exp_n[:,:,fit[2*k+4]]
elif part == "-":
cudaout_temp[:,:,j] -= exp_n[:,:,fit[2*k+4]]
#Replaces the old attribute assigned to cudaout with new one
self.cudaout[i] = cudaout_temp
#Scaling method for approach 1 and 2
##(method gets called by sorting_files)
##No arguments
def scaling(self):
#Initiates list for each experiment
self.scale = [""]*self.nmodels
#Iterates over each experiment
for model in range(self.nmodels):
#Calculates the maximum difference between trajectores and output from cuda sim
maxDistTraj = max([math.fabs(numpy.amax(self.trajectories[model]) - numpy.amin(self.cudaout[model])),math.fabs(numpy.amax(self.cudaout[model]) - numpy.amin(self.trajectories[model]))])
#Sets an arbitray precision
preci = pow(10,-34)
#Calculates number proportional to normal distribution based on maximum distance
FmaxDistTraj = 1.0*math.exp(-(maxDistTraj*maxDistTraj)/(2.0*self.sigma*self.sigma))
#Depending on how FmaxDistTraj compares to precision set an appropriate scale
if FmaxDistTraj<preci:
# self.scale[model] = math.log(pow(1.79*pow(10,300),1.0/(len(self.fitSpecies[model])*len(self.times))),math.e)
self.scale[model] = pow(1.79*pow(10,300),1.0/(len(self.fitSpecies[model])*len(self.times)))
else:
self.scale[model] = pow(preci,1.0/(len(self.fitSpecies[model])*len(self.times)))*1.0/FmaxDistTraj
#Scaling method for approach 3
##(method gets called by sorting_files)
##M_Ref - number of timepoints in reference model
##P_Ref - number of measurable species in reference model
##Note if scaling_ge3 called for reference model then M_ref and P_ref take default values
def scaling_ge3(self,M_Ref = 0,P_Ref = 0):
#Initiates list of maximum distances between trajectories
maxDistList =[]
#Iterates over experiments
for model in range(self.nmodels):
#Initiates list for distances
distance = []
#Calculates distances between trajectories at timepoints
for tp in range(len(self.times)):
distance.append(max([math.fabs(numpy.amax(self.trajectories[model][:,tp,:]) - numpy.amin(self.cudaout[model][:,tp,:])),math.fabs(numpy.amax(self.cudaout[model][:,tp,:]) - numpy.amin(self.trajectories[model][:,tp,:]))]))
#Assigns maximum of distance to maxDistList
maxDistList.append(numpy.amax(numpy.array(distance)))
#MIGHT NOT BE NEEDED
#maxDistTraj = max(maxDistList)
#Initiates scales list
self.scale = [""]*self.nmodels
#Sets arbitrary precision level
preci = pow(10,-34)
#Iterates over experiments
for model in range(self.nmodels):
#Extracts timepoints and measurable species for each experiment
M_Alt = len(self.times)
P_Alt = len(self.fitSpecies[model])
#Finds maximum between reference and experiment
M_Max = float(max(M_Ref,M_Alt))
P_Max = float(max(P_Ref,P_Alt))
#Calculates proportional to normal based on maximum distance between trajectories on log scale
scale1 = math.log(preci)/(2.0*M_Max*P_Max) + (maxDistList[model]*maxDistList[model])/(2.0*self.sigma*self.sigma)
#Calculates comparative scale
scale2 = math.log(pow(10,300))/(2.0*M_Max*P_Max)
#Compares scales and assigns appropriate one
if(scale1<scale2): self.scale[model] = scale1
else: self.scale[model] = 0.0
#A method to copy prior sample from reference model for approach 3
##(method gets called by sorting_files)
##refmod - object for the reference model
def copyTHETAS(self,refmod):
#Sets the N3 sample for experiments
self.particles -= self.N3sample
self.N3sample = 0
#Sets parameter sample for experiment
self.parameterSample = numpy.concatenate((refmod.parameterSample[:self.N1sample+self.N2sample,:],refmod.N4parameterSample),axis = 0)
#Sets sample for initial conditions
if self.initialprior == True and refmod.initialprior == True:
self.speciesSample = numpy.concatenate((refmod.speciesSample[:self.N1sample+self.N2sample,:],refmod.N4speciesSample),axis = 0)
#For when constant initial conditions are used
elif self.initialprior == False and refmod.initialprior == False:
#Finds set of constant initial conditions
x0prior_uniq = [self.x0prior[0]]
for ic in self.x0prior[1:]:
if ic not in x0prior_uniq:
x0prior_uniq.append(ic)
#Initiates list of arrays for species sample
species = [numpy.zeros([self.particles,self.nspecies_all]) for x in range(len(x0prior_uniq))] # number of repeats x species in system
#Copies the initial conditions into species list of arrays
for ic in range(len(x0prior_uniq)):
#Loop through number of parameter
for j in range(len(x0prior_uniq[ic])):
#####Constant prior#####
if(x0prior_uniq[ic][j][0]==0): # j paramater self.index
species[ic][:,j] = x0prior_uniq[ic][j][1]
else:
print " Prior distribution not defined on initial conditions"
sys.exit()
#Sets attribute for species sample
self.speciesSample = species
else:
print "Not sampling from the same prior between reference and experimental models"
sys.exit()
#For when compartments are used
if refmod.ncompparams_all > 0:
self.compsSample = numpy.concatenate((refmod.compsSample[:self.N1sample+self.N2sample,:],refmod.N4compsSample),axis = 0)
| {
"repo_name": "MichaelPHStumpf/Peitho",
"path": "peitho/errors_and_parsers/ode_parsers/parse_object_info.py",
"copies": "1",
"size": "44706",
"license": "mit",
"hash": 6448375392215181000,
"line_mean": 36.5365239295,
"line_max": 230,
"alpha_frac": 0.6750548025,
"autogenerated": false,
"ratio": 3.128262542859142,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4303317345359142,
"avg_score": null,
"num_lines": null
} |
# algorithm:
# 0. remove from consideration any QC test that fails to produce TPR / FPR >= some tunable threshold
# 1. remove from consideration any bad profile not flagged by any test; put these aside for new qc test design
# 2. accept all individual qc tests with 0% fpr; remove these from consideration, along with all profiles they flag
# 3. form list of ntuple AND combos, add their decisions to consideration
# 4. identify profiles flagged by exactly one combination. Accept that combination, drop all profiles marked by this combination, and drop this combination from further consideration
# 5. drop the remaining combination with the highest false positive rate (note at this step all remaining profiles are flagged by at least two combination, so this will not raise the false negative rate).
# 6. go back to 4; loop until the list of accepted combinations flags all bad profiles not dropped in step 1.
ar = __import__('analyse-results')
import util.main as main
import util.dbutils as dbutils
import itertools, sys, json, getopt
from operator import itemgetter
def ntuples(names, n=2):
'''
given a list of names of tests, form every ntuple up to and including n combinations from the list
return as a list of tuples.
'''
combos = []
for i in range(2,n+1):
combos += itertools.combinations(names, i)
return combos
def amend(combo, df):
'''
add a column to df describing the results of combo
column title will be the combo elements joined with '&'
'''
decision = df[combo[0]]
for test in combo[1:]:
decision = decision & df[test]
name = '&'.join(combo)
return df.assign(xx=decision).rename(index=str, columns={'xx': name})
# parse command line options
options, remainder = getopt.getopt(sys.argv[1:], 't:d:n:o:h')
targetdb = 'iquod.db'
dbtable = 'iquod'
outfile = 'htp.json'
samplesize = None
for opt, arg in options:
if opt == '-d':
dbtable = arg
if opt == '-t':
targetdb = arg
if opt == '-n':
samplesize = int(arg)
if opt == '-o':
outfile = arg
if opt == '-h':
print('usage:')
print('-d <db table name to read from>')
print('-t <name of db file>')
print('-n <number of profiles to consider>')
print('-o <filename to write json results out to>')
print('-h print this help message and quit')
if samplesize is None:
print('please provide a sample size to consider with the -n flag')
print('-h to print usage')
# Read QC test specifications if required.
groupdefinition = ar.read_qc_groups()
# Read data from database into a pandas data frame.
df = dbutils.db_to_df(table=dbtable,
targetdb=targetdb,
filter_on_wire_break_test = False,
filter_on_tests = groupdefinition,
n_to_extract = samplesize)
testNames = df.columns[2:].values.tolist()
# declare some downstream constructs
accepted = []
unflagged = []
fprs = []
bad = df.loc[df['Truth']]
bad.reset_index(inplace=True, drop=True)
# mark chosen profiles as part of the training set
all_uids = main.dbinteract('SELECT uid from ' + dbtable + ';', targetdb=targetdb)
for uid in all_uids:
uid = uid[0]
is_training = int(uid in df['uid'].astype(int).as_matrix())
query = "UPDATE " + dbtable + " SET training=" + str(is_training) + " WHERE uid=" + str(uid) + ";"
main.dbinteract(query, targetdb=targetdb)
# algo. step 0:
# demand individual QC tests have TPR/FPR > some threshold
perf_thresh = 2
drop_tests = []
for test in testNames:
tpr, fpr, fnr, tnr = main.calcRates(df[test].tolist(), df['Truth'].tolist())
if fpr > 0 and tpr / fpr < perf_thresh:
print('dropping', test, '; tpr/fpr = ', tpr/fpr)
df.drop([test], axis=1)
bad.drop([test], axis=1)
drop_tests.append(test)
testNames = [x for x in testNames if x not in drop_tests]
# algo. step 1:
# note profiles that weren't flagged by any test
for i in range(len(bad)):
if not any(bad.ix[i][testNames]):
unflagged.append(bad.ix[i]['uid'])
# drop these from consideration
bad = bad[~bad['uid'].isin(unflagged)]
# algo. step 2:
# assess fprs for individual tests
for x in testNames:
tpr, fpr, fnr, tnr = main.calcRates(df[x].as_matrix(), df['Truth'].as_matrix())
fprs.append([x, fpr, tpr])
# accept tests that flag bad profiles with no false positives
print('number of bad profiles to consider:', len(bad))
print('these tests accepted for having no false poitives and more than zero true positives:')
for test in fprs:
if test[1] == 0 and test[2] > 0:
accepted.append(test[0])
print(test[0])
bad = bad[bad[test[0]]==False]
bad = bad.drop([test[0]], axis=1)
testNames.remove(test[0])
fprs = [elt for elt in fprs if elt[0] not in accepted]
print('number of bad profiles remaining:', len(bad))
# algo. step 3
# add a column to df for each combo, summarizing its decision for each profile
combos = ntuples(testNames)
combonames = ['&'.join(x) for x in combos]
for combo in combos:
bad = amend(combo, bad)
df = amend(combo, df)
# assess tpr, fpr for each test and combo:
for x in combonames:
tpr, fpr, fnr, tnr = main.calcRates(df[x].as_matrix(), df['Truth'].as_matrix())
fprs.append([x, fpr, tpr])
fprs.sort(key=lambda tup: tup[1])
# algo. step 4
while len(bad) > 0:
nosingleflags = True
for i in range(len(bad)):
x = bad.ix[i][testNames+combonames]
if sum(x) == 1:
winner = x[x].keys()[0]
accepted.append(winner) # accept the combo as the only one flagging this bad profile
ff = [x for x in fprs if x[0] == winner][0][1]
tf = [x for x in fprs if x[0] == winner][0][2]
print('accepted', winner, 'tpr=', tf, '; fpr=', ff)
bad = bad[bad[winner]==False] # drop all bad profiles flagged by this combo
bad = bad.drop([winner], axis=1) # remove the combo from consideration
testNames = [elt for elt in testNames if elt is not winner]
combonames = [elt for elt in combonames if elt is not winner]
fprs = [elt for elt in fprs if elt[0] is not winner]
nosingleflags=False
break
# algo. step 5
if nosingleflags:
maxfpr = fprs[-1][0]
bad = bad.drop([maxfpr], axis=1)
testNames = [x for x in testNames if x is not maxfpr]
combonames = [x for x in combonames if x is not maxfpr]
del fprs[-1]
print('profiles not caught by any test:')
print(unflagged)
f = open(outfile, 'w')
r = {'tests': accepted}
json.dump(r, f)
f.close()
| {
"repo_name": "BillMills/AutoQC",
"path": "catchall.py",
"copies": "2",
"size": "6659",
"license": "mit",
"hash": -4400967086091814400,
"line_mean": 36.2011173184,
"line_max": 204,
"alpha_frac": 0.6424388046,
"autogenerated": false,
"ratio": 3.3580433686333837,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9979520529183277,
"avg_score": 0.00419232881002126,
"num_lines": 179
} |
# Algorithm
# =========
algorithm = "fourier"
# Time stepping
# =============
# Perform a simulation in the time interval [0, T].
T = 3.0
# Duration of a single time step.
dt = 0.02
# Semi-classical parameter
# ========================
# The epsilon parameter in the semiclassical scaling
eps = 0.2
# Potential
# =========
# The potential used in the simulation
potential = "delta_gap"
# Energy gap, used in the definition of this potential
delta = 0.1*eps
# Initial values
# ==============
# The hagedorn parameters of the initial wavepackets
parameters = [ (1.0j, 1.0-2.0j, 0.0, 1.0, -2.0),
(1.0j, 1.0-2.0j, 0.0, 1.0, -2.0) ]
# A list with the lists of (index,value) tuples that set the coefficients
# of the basis functions for the initial wavepackets.
coefficients = [ [(0,1.0)], [(0,0.0)] ]
# Number of basis functions used for Hagedorn packets.
basis_size = 2
# Specific for Fourier
# ====================
# Number of grid nodes
ngn = 2**12
# Scaling factor for the computational domain
# The interval in the position space is [-f*pi, f*pi]
f = 2.0
# I/O configuration
# =================
# Write data to disk only each n-th timestep
write_nth = 2
| {
"repo_name": "WaveBlocks/WaveBlocks",
"path": "doc/manual/examples/parameters_01.py",
"copies": "1",
"size": "1193",
"license": "bsd-3-clause",
"hash": 3764348063394018000,
"line_mean": 17.9365079365,
"line_max": 73,
"alpha_frac": 0.6135792121,
"autogenerated": false,
"ratio": 3.0747422680412373,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4188321480141237,
"avg_score": null,
"num_lines": null
} |
import csv
import path
__author__ = "Dan Rugeles"
__copyright__ = "Copyright 2013, Accelerometrics"
__credits__ = ["Dan Rugeles"]
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = "Dan Rugeles"
__email__ = "danrugeles@gmail.com"
__status__ = "Production"
#1.0.1
"main(): Added Main to easily preprocess several fields"
#--preprocess------------------------------------------------------
"""Preprocesses Training and Testing Data
TRAIN: Boolean represents if we are preprocessing training or testing data
windowduration: float represents time in seconds
"""
#-------------------------------------------------------------------
def preprocess(TRAIN,windowduration):
if TRAIN:
inputfile=path.root+"train.csv"
# last ID read
lastId=7
# File to store new sequence
userfile= open(path.user+str(lastId)+".csv","w")
else:
inputfile=path.root+"test.csv"
# last ID read
lastId=100006
# File to store new sequence
path.user=path.sequence
userfile= open(path.user+str(lastId)+".csv","w")
# Contain aggregated data
x_s=[];y_s=[];z_s=[];rss_s=[]
# Inititalize time quantization
lastnewtime=0
average = lambda x: sum(x)/len(x)
with open(inputfile,"rb") as csvfile:
alldata=csv.reader(csvfile,delimiter=",")
for idx,row in enumerate(alldata):
#** Definitions
firstsample_ = (idx==0)
f_row=[float(x) for x in row]
time=f_row[0];x=f_row[1];y=f_row[2];z=f_row[3];Id=int(f_row[-1])
rss=(x**2+y**2+z**2)**0.5
newuser_= (Id!=lastId)
if firstsample_ or newuser_: initialtime=time;
newtime=int((time-initialtime)/(windowduration*1000))
newwindow_=(newtime!=lastnewtime or newuser_)
aggregatedtime=lastnewtime*windowduration
#** Filter wrong data
if x==0 and y==0 and z == 0: continue
#** New window? Write aggregated result and start aggregating
if newwindow_:
lastnewtime=newtime
userfile.write(str(aggregatedtime)+
","+str(average(x_s))+
","+str(average(y_s))+
","+str(average(z_s))+
","+str(average(rss_s))+"\n")
x_s[:]=[];y_s[:]=[];z_s[:]=[];rss_s[:]=[]
#** New User? Create File
if newuser_:
lastnewtime=0
lastId=Id
userfile.close()
userfile= open(path.user+str(Id)+".csv","w")
currenttime=0;
#** Aggregate data in windows of time.
x_s.append(x);y_s.append(y);z_s.append(z);rss_s.append(rss)
#** Uncomment to see individual data
#userfile.write("_"+str(newtime*windowduration)+","+",".join(row[:-1])+","+str(rss)+"\n")
userfile.close()
"""----------------------------*
* *
* |\ /| /\ | |\ | *
* | \/ | /__\ | | \ | *
* | | / \ | | \| *
* *
*----------------------------"""
if __name__=="__main__":
TRAIN=False
windowduration=0.5
preprocess(TRAIN,windowduration)
| {
"repo_name": "danrugeles/Accelerometrics",
"path": "preprocess.py",
"copies": "1",
"size": "2909",
"license": "mit",
"hash": -457842479892118400,
"line_mean": 26.4433962264,
"line_max": 92,
"alpha_frac": 0.5623925748,
"autogenerated": false,
"ratio": 2.9354187689202824,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3997811343720283,
"avg_score": null,
"num_lines": null
} |
""" Algorithm predicting a random rating.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from .algo_base import AlgoBase
class NormalPredictor(AlgoBase):
"""Algorithm predicting a random rating based on the distribution of the
training set, which is assumed to be normal.
The prediction :math:`\hat{r}_{ui}` is generated from a normal distribution
:math:`\mathcal{N}(\hat{\mu}, \hat{\sigma}^2)` where :math:`\hat{\mu}` and
:math:`\hat{\sigma}` are estimated from the training data using Maximum
Likelihood Estimation:
.. math::
\\hat{\mu} &= \\frac{1}{|R_{train}|} \\sum_{r_{ui} \\in R_{train}}
r_{ui}\\\\\\\\\
\\hat{\sigma} &= \\sqrt{\\sum_{r_{ui} \\in R_{train}}
\\frac{(r_{ui} - \\hat{\mu})^2}{|R_{train}|}}
"""
def __init__(self):
AlgoBase.__init__(self)
def train(self, trainset):
AlgoBase.train(self, trainset)
num = sum((r - self.trainset.global_mean)**2
for (_, _, r) in self.trainset.all_ratings())
denum = self.trainset.n_ratings
self.sigma = np.sqrt(num / denum)
def estimate(self, *_):
return np.random.normal(self.trainset.global_mean, self.sigma)
| {
"repo_name": "charmoniumQ/Surprise",
"path": "surprise/prediction_algorithms/random_pred.py",
"copies": "1",
"size": "1302",
"license": "bsd-3-clause",
"hash": 1470354788537799700,
"line_mean": 29.2790697674,
"line_max": 79,
"alpha_frac": 0.5852534562,
"autogenerated": false,
"ratio": 3.4263157894736844,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45115692456736844,
"avg_score": null,
"num_lines": null
} |
"""Algorithms and strategies to play 2048 and collect experience."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import math
import itertools
import numpy as np
from qlearning4k.games.twenty48 import Twenty48
def random_strategy(_, actions):
"""Strategy that always chooses actions at random."""
return np.random.choice(actions)
def static_preference_strategy(_, actions):
"""Always prefer left over up over right over top."""
return min(actions)
def highest_reward_strategy(state, actions):
"""Strategy that always chooses the action of highest immediate reward.
If there are any ties, the strategy prefers left over up over right over down.
"""
sorted_actions = np.sort(actions)[::-1]
rewards = map(lambda action: state.copy().move(action),
sorted_actions)
action_index = np.argsort(rewards, kind="mergesort")[-1]
return sorted_actions[action_index]
def play(game, strategy, num_games=100, verbose=False, allow_unavailable_action=False):
"""Plays a single game, using a provided strategy.
Args:
strategy: A function that takes as argument a state and a list of available
actions and returns an action from the list.
allow_unavailable_action: Boolean, whether strategy is passed all actions
or just the available ones.
verbose: If true, prints game states, actions and scores.
Returns:
score, experiences where score is the final score and experiences is the
list Experience instances that represent the collected experience.
"""
#score, num_moves, time, max_tile
scores = []
for i in xrange(num_games):
start_time = time.time()
state = game.copy()
game_over = game.is_over()
while not game_over:
action = strategy(state, state.get_possible_actions())
# print(state.grid)
state.move(action)
game_over = state.is_over()
if verbose:
print(state.grid)
scores.append([state._score, state._num_moves, time.time() - start_time, 2**np.max(state.grid)])
return np.array(scores)
def snake(state):
"""
Returns the heuristic value of b
Snake refers to the "snake line pattern" (http://tinyurl.com/l9bstk6)
Here we only evaluate one direction; we award more points if high valued tiles
occur along this path. We penalize the board for not having
the highest valued tile in the lower left corner
"""
snake = []
for i in range(4):
snake.extend(reversed(state.grid[:, i]) if i % 2 == 0 else state.grid[:, i])
m = max(snake)
return sum(x / 10 ** n for n, x in enumerate(snake)) - \
math.pow((state.grid[3, 0] != m) * abs(state.grid[3, 0] - m), 2)
def smooth(state):
s = 0
for i in range(4):
for j in range(4):
if i + 1 < 4:
s -= np.abs(state.grid[i,j] - state.grid[i+1, j])
if j + 1 < 4:
s -= np.abs(state.grid[i, j] - state.grid[i, j+1])
return s
def num_zeros(state):
return np.sum(state.grid == 0)
def np_on_edge(state):
s = 0
for i in range(4):
r = np.argmax(state.grid[i])
c = np.argmax(state.grid[:,i])
s += r == 0
s += r == 3
s += c == 0
s += c == 0
return s
def expecti(heuristic, d=5):
"""
Performs expectimax search on a given configuration to
specified depth (d).
Algorithm details:
- if the AI needs to move, make each child move,
recurse, return the maximum fitness value
- if it is not the AI's turn, form all
possible child spawns, and return their weighted average
as that node's evaluation
"""
def agent(state, actions):
def alpha_beta_search(state, d, move=False):
if d == 0 or state.is_over():
return heuristic(state)
alpha = heuristic(state)
if move:
for action in state.get_possible_actions():
temp = state.copy()
temp.move(action)
return max(alpha, alpha_beta_search(temp, d - 1))
else:
zeros = [(i,j) for i, j in itertools.product(range(4), range(4)) if state.grid[i][j] == 0]
for i, j in zeros:
state2 = state.copy()
state.grid[i, j] = 2
state2.grid[i, j] = 4
alpha += .9*alpha_beta_search(state, d-1, move=True)/len(zeros) + .1*alpha_beta_search(state2, d-1, move=True)/len(zeros)
return alpha
best_action = 0
best_alpha = -np.inf
for action in actions:
temp = state.copy()
temp.move(action)
alpha = alpha_beta_search(temp, d)
if alpha > best_alpha:
best_action = action
best_alpha = alpha
return best_action
return agent
| {
"repo_name": "bhillmann/2048-rl",
"path": "qlearning4k/agents/agents_twenty48.py",
"copies": "1",
"size": "5024",
"license": "mit",
"hash": 1192259145059650000,
"line_mean": 31,
"line_max": 141,
"alpha_frac": 0.5905652866,
"autogenerated": false,
"ratio": 3.7746055597295265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4865170846329526,
"avg_score": null,
"num_lines": null
} |
""" Algorithms for clustering : Meanshift, Affinity propagation and spectral
clustering.
"""
# Author: Alexandre Gramfort alexandre.gramfort@inria.fr
# Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import as_float_array, check_array
from ..utils.validation import check_is_fitted
from ..metrics import euclidean_distances
from ..metrics import pairwise_distances_argmin
def affinity_propagation(S, preference=None, convergence_iter=15, max_iter=200,
damping=0.5, copy=True, verbose=False,
return_n_iter=False):
"""Perform Affinity Propagation Clustering of data
Parameters
----------
S : array-like, shape (n_samples, n_samples)
Matrix of similarities between points
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number of
exemplars, i.e. of clusters, is influenced by the input preferences
value. If the preferences are not passed as arguments, they will be
set to the median of the input similarities (resulting in a moderate
number of clusters). For a smaller amount of clusters, this can be set
to the minimum value of the similarities.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
copy : boolean, optional, default: True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency
verbose : boolean, optional, default: False
The verbosity level
return_n_iter : bool, default False
Whether or not to return the number of iterations.
Returns
-------
cluster_centers_indices : array, shape (n_clusters,)
index of clusters centers
labels : array, shape (n_samples,)
cluster labels for each point
n_iter : int
number of iterations run. Returned only if `return_n_iter` is
set to True.
Notes
-----
See examples/cluster/plot_affinity_propagation.py for an example.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
S = as_float_array(S, copy=copy)
n_samples = S.shape[0]
if S.shape[0] != S.shape[1]:
raise ValueError("S must be a square array (shape=%s)" % repr(S.shape))
if preference is None:
preference = np.median(S)
if damping < 0.5 or damping >= 1:
raise ValueError('damping must be >= 0.5 and < 1')
random_state = np.random.RandomState(0)
# Place preference on the diagonal of S
S.flat[::(n_samples + 1)] = preference
A = np.zeros((n_samples, n_samples))
R = np.zeros((n_samples, n_samples)) # Initialize messages
# Intermediate results
tmp = np.zeros((n_samples, n_samples))
# Remove degeneracies
S += ((np.finfo(np.double).eps * S + np.finfo(np.double).tiny * 100) *
random_state.randn(n_samples, n_samples))
# Execute parallel affinity propagation updates
e = np.zeros((n_samples, convergence_iter))
ind = np.arange(n_samples)
for it in range(max_iter):
# tmp = A + S; compute responsibilities
np.add(A, S, tmp)
I = np.argmax(tmp, axis=1)
Y = tmp[ind, I] # np.max(A + S, axis=1)
tmp[ind, I] = -np.inf
Y2 = np.max(tmp, axis=1)
# tmp = Rnew
np.subtract(S, Y[:, None], tmp)
tmp[ind, I] = S[ind, I] - Y2
# Damping
tmp *= 1 - damping
R *= damping
R += tmp
# tmp = Rp; compute availabilities
np.maximum(R, 0, tmp)
tmp.flat[::n_samples + 1] = R.flat[::n_samples + 1]
# tmp = -Anew
tmp -= np.sum(tmp, axis=0)
dA = np.diag(tmp).copy()
tmp.clip(0, np.inf, tmp)
tmp.flat[::n_samples + 1] = dA
# Damping
tmp *= 1 - damping
A *= damping
A -= tmp
# Check for convergence
E = (np.diag(A) + np.diag(R)) > 0
e[:, it % convergence_iter] = E
K = np.sum(E, axis=0)
if it >= convergence_iter:
se = np.sum(e, axis=1)
unconverged = (np.sum((se == convergence_iter) + (se == 0))
!= n_samples)
if (not unconverged and (K > 0)) or (it == max_iter):
if verbose:
print("Converged after %d iterations." % it)
break
else:
if verbose:
print("Did not converge")
I = np.where(np.diag(A + R) > 0)[0]
K = I.size # Identify exemplars
if K > 0:
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K) # Identify clusters
# Refine the final set of exemplars and clusters and return results
for k in range(K):
ii = np.where(c == k)[0]
j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
I[k] = ii[j]
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K)
labels = I[c]
# Reduce labels to a sorted, gapless, list
cluster_centers_indices = np.unique(labels)
labels = np.searchsorted(cluster_centers_indices, labels)
else:
labels = np.empty((n_samples, 1))
cluster_centers_indices = None
labels.fill(np.nan)
if return_n_iter:
return cluster_centers_indices, labels, it + 1
else:
return cluster_centers_indices, labels
###############################################################################
class AffinityPropagation(BaseEstimator, ClusterMixin):
"""Perform Affinity Propagation Clustering of data.
Parameters
----------
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations.
copy : boolean, optional, default: True
Make a copy of input data.
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number
of exemplars, ie of clusters, is influenced by the input
preferences value. If the preferences are not passed as arguments,
they will be set to the median of the input similarities.
affinity : string, optional, default=``euclidean``
Which affinity to use. At the moment ``precomputed`` and
``euclidean`` are supported. ``euclidean`` uses the
negative squared euclidean distance between points.
verbose : boolean, optional, default: False
Whether to be verbose.
Attributes
----------
cluster_centers_indices_ : array, shape (n_clusters,)
Indices of cluster centers
cluster_centers_ : array, shape (n_clusters, n_features)
Cluster centers (if affinity != ``precomputed``).
labels_ : array, shape (n_samples,)
Labels of each point
affinity_matrix_ : array, shape (n_samples, n_samples)
Stores the affinity matrix used in ``fit``.
n_iter_ : int
Number of iterations taken to converge.
Notes
-----
See examples/cluster/plot_affinity_propagation.py for an example.
The algorithmic complexity of affinity propagation is quadratic
in the number of points.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
def __init__(self, damping=.5, max_iter=200, convergence_iter=15,
copy=True, preference=None, affinity='euclidean',
verbose=False):
self.damping = damping
self.max_iter = max_iter
self.convergence_iter = convergence_iter
self.copy = copy
self.verbose = verbose
self.preference = preference
self.affinity = affinity
@property
def _pairwise(self):
return self.affinity == "precomputed"
def fit(self, X, y=None):
""" Create affinity matrix from negative euclidean distances, then
apply affinity propagation clustering.
Parameters
----------
X: array-like, shape (n_samples, n_features) or (n_samples, n_samples)
Data matrix or, if affinity is ``precomputed``, matrix of
similarities / affinities.
"""
X = check_array(X, accept_sparse='csr')
if self.affinity == "precomputed":
self.affinity_matrix_ = X
elif self.affinity == "euclidean":
self.affinity_matrix_ = -euclidean_distances(X, squared=True)
else:
raise ValueError("Affinity must be 'precomputed' or "
"'euclidean'. Got %s instead"
% str(self.affinity))
self.cluster_centers_indices_, self.labels_, self.n_iter_ = \
affinity_propagation(
self.affinity_matrix_, self.preference, max_iter=self.max_iter,
convergence_iter=self.convergence_iter, damping=self.damping,
copy=self.copy, verbose=self.verbose, return_n_iter=True)
if self.affinity != "precomputed":
self.cluster_centers_ = X[self.cluster_centers_indices_].copy()
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : array, shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_indices_")
if not hasattr(self, "cluster_centers_"):
raise ValueError("Predict method is not supported when "
"affinity='precomputed'.")
return pairwise_distances_argmin(X, self.cluster_centers_)
| {
"repo_name": "rseubert/scikit-learn",
"path": "sklearn/cluster/affinity_propagation_.py",
"copies": "21",
"size": "10605",
"license": "bsd-3-clause",
"hash": 5238808414725888000,
"line_mean": 31.9347826087,
"line_max": 79,
"alpha_frac": 0.597359736,
"autogenerated": false,
"ratio": 4.021615472127418,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
""" Algorithms for computing spanning trees of entity graphs. """
__author__ = 'smartschat'
def precision_system_output(entity, partitioned_entity):
""" Compute a spanning tree from antecedent information.
All edges in the spanning tree correspond to anaphor-antecedent pairs. In
order to access this antecedent information, the attribute "antecedent" of
the mentions in the entity must be set.
Args:
entity (EntityGraph): The EntityGraph for the entity for which the
spanning tree should be computed.
partitioned_entity (EntityGraph): A partition of the entity -- not
used for this algorithm.
Returns:
list(Mention, Mention): A list of mention pairs, which constitute the
edges of the spanning tree. For a pair (m, n), n appears later in
the text than m.
"""
edges = []
for mention in entity.edges:
# just look at system output
if ("antecedent" in mention.attributes
and mention.attributes["antecedent"] in entity.edges[mention]):
edges.append((mention, mention.attributes["antecedent"]))
return sorted(edges)
def recall_closest(entity, partitioned_entity):
""" Compute a spanning tree by always taking the closest mention in the same
entity.
Args:
entity (EntityGraph): The EntityGraph for the entity for which the
spanning tree should be computed.
partitioned_entity (EntityGraph): A partition of the entity -- not
used for this algorithm.
Returns:
list(Mention, Mention): A list of mention pairs, which constitute the
edges of the spanning tree. For a pair (m, n), n appears later in
the text than m.
"""
edges = []
for mention in entity.edges:
# always take closest (except for first mention in entity, which does
# not have any antecedent)
if entity.edges[mention]:
if mention in partitioned_entity.edges:
antecedent = sorted(partitioned_entity.edges[mention],
reverse=True)[0]
else:
antecedent = sorted(entity.edges[mention], reverse=True)[0]
edges.append((mention, antecedent))
return sorted(edges)
def recall_accessibility(entity, partitioned_entity):
""" Compute a spanning tree by choosing edges according to the accessibility
of the antecedent.
First, if a mention has an out-degree of at least one in the partitioned
entity, take the edge with the closest mention distance as an edge for
the spanning tree. Otherwise, proceed as follows.
If a mention m is a proper name or a common noun, choose an antecedent as
follows:
- if a proper name antecedent exists, take the closest and output this
pair as an edge
- else if a common noun antecedent exists, take the closest and output
this pair as an edge
- else take the closest preceding mention and output this pair as an
edge
For all other mentions, take the closest preceding mention and output
this pair as an edge.
Args:
entity (EntityGraph): The EntityGraph for the entity for which the
spanning tree should be computed.
partitioned_entity (EntityGraph): A partition of the entity -- not
used for this algorithm.
Returns:
list(Mention, Mention): A list of mention pairs, which constitute the
edges of the spanning tree. For a pair (m, n), n appears later in
the text than m.
"""
edges = []
for mention in entity.edges:
if entity.edges[mention]:
# mention is not the first in subentity? take closest!
if mention in partitioned_entity.edges:
antecedent = sorted(partitioned_entity.edges[mention],
reverse=True)[0]
else:
antecedent = __get_antecedent_by_type(mention,
entity.edges[mention])
edges.append((mention, antecedent))
return sorted(edges)
def __get_antecedent_by_type(mention, candidates):
# make sure...
candidates_reversed = sorted(candidates, reverse=True)
# mention is (demonstrative) pronoun? take closest!
if (mention.attributes["type"] == "PRO" or
mention.attributes["type"] == "DEM"):
return candidates_reversed[0]
# otherwise chose by type, back off to closest
elif __get_by_pos(candidates_reversed, "NAM"):
return __get_by_pos(candidates_reversed, "NAM")
elif __get_by_pos(candidates_reversed, "NOM"):
return __get_by_pos(candidates_reversed, "NOM")
else:
return candidates_reversed[0]
def __get_by_pos(candidates, pos):
for mention in candidates:
if mention.attributes["type"] == pos:
return mention
| {
"repo_name": "Yegor-Budnikov/cort",
"path": "cort/analysis/spanning_tree_algorithms.py",
"copies": "2",
"size": "4937",
"license": "mit",
"hash": 3987678294780118500,
"line_mean": 36.4015151515,
"line_max": 80,
"alpha_frac": 0.6370265343,
"autogenerated": false,
"ratio": 4.323117338003502,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5960143872303503,
"avg_score": null,
"num_lines": null
} |
"""Algorithms for computing symbolic roots of polynomials. """
from __future__ import print_function, division
import math
from sympy.core import S, I, pi
from sympy.core.compatibility import ordered, range, reduce
from sympy.core.exprtools import factor_terms
from sympy.core.function import _mexpand
from sympy.core.logic import fuzzy_not
from sympy.core.mul import expand_2arg, Mul
from sympy.core.numbers import Rational, igcd, comp
from sympy.core.power import Pow
from sympy.core.relational import Eq
from sympy.core.symbol import Dummy, Symbol, symbols
from sympy.core.sympify import sympify
from sympy.functions import exp, sqrt, im, cos, acos, Piecewise
from sympy.functions.elementary.miscellaneous import root
from sympy.ntheory import divisors, isprime, nextprime
from sympy.polys.polyerrors import (PolynomialError, GeneratorsNeeded,
DomainError)
from sympy.polys.polyquinticconst import PolyQuintic
from sympy.polys.polytools import Poly, cancel, factor, gcd_list, discriminant
from sympy.polys.rationaltools import together
from sympy.polys.specialpolys import cyclotomic_poly
from sympy.simplify import simplify, powsimp
from sympy.utilities import public
def roots_linear(f):
"""Returns a list of roots of a linear polynomial."""
r = -f.nth(0)/f.nth(1)
dom = f.get_domain()
if not dom.is_Numerical:
if dom.is_Composite:
r = factor(r)
else:
r = simplify(r)
return [r]
def roots_quadratic(f):
"""Returns a list of roots of a quadratic polynomial. If the domain is ZZ
then the roots will be sorted with negatives coming before positives.
The ordering will be the same for any numerical coefficients as long as
the assumptions tested are correct, otherwise the ordering will not be
sorted (but will be canonical).
"""
a, b, c = f.all_coeffs()
dom = f.get_domain()
def _sqrt(d):
# remove squares from square root since both will be represented
# in the results; a similar thing is happening in roots() but
# must be duplicated here because not all quadratics are binomials
co = []
other = []
for di in Mul.make_args(d):
if di.is_Pow and di.exp.is_Integer and di.exp % 2 == 0:
co.append(Pow(di.base, di.exp//2))
else:
other.append(di)
if co:
d = Mul(*other)
co = Mul(*co)
return co*sqrt(d)
return sqrt(d)
def _simplify(expr):
if dom.is_Composite:
return factor(expr)
else:
return simplify(expr)
if c is S.Zero:
r0, r1 = S.Zero, -b/a
if not dom.is_Numerical:
r1 = _simplify(r1)
elif r1.is_negative:
r0, r1 = r1, r0
elif b is S.Zero:
r = -c/a
if not dom.is_Numerical:
r = _simplify(r)
R = _sqrt(r)
r0 = -R
r1 = R
else:
d = b**2 - 4*a*c
A = 2*a
B = -b/A
if not dom.is_Numerical:
d = _simplify(d)
B = _simplify(B)
D = factor_terms(_sqrt(d)/A)
r0 = B - D
r1 = B + D
if a.is_negative:
r0, r1 = r1, r0
elif not dom.is_Numerical:
r0, r1 = [expand_2arg(i) for i in (r0, r1)]
return [r0, r1]
def roots_cubic(f, trig=False):
"""Returns a list of roots of a cubic polynomial.
References
==========
[1] https://en.wikipedia.org/wiki/Cubic_function, General formula for roots,
(accessed November 17, 2014).
"""
if trig:
a, b, c, d = f.all_coeffs()
p = (3*a*c - b**2)/3/a**2
q = (2*b**3 - 9*a*b*c + 27*a**2*d)/(27*a**3)
D = 18*a*b*c*d - 4*b**3*d + b**2*c**2 - 4*a*c**3 - 27*a**2*d**2
if (D > 0) == True:
rv = []
for k in range(3):
rv.append(2*sqrt(-p/3)*cos(acos(q/p*sqrt(-3/p)*Rational(3, 2))/3 - k*pi*Rational(2, 3)))
return [i - b/3/a for i in rv]
_, a, b, c = f.monic().all_coeffs()
if c is S.Zero:
x1, x2 = roots([1, a, b], multiple=True)
return [x1, S.Zero, x2]
p = b - a**2/3
q = c - a*b/3 + 2*a**3/27
pon3 = p/3
aon3 = a/3
u1 = None
if p is S.Zero:
if q is S.Zero:
return [-aon3]*3
if q.is_real:
if q.is_positive:
u1 = -root(q, 3)
elif q.is_negative:
u1 = root(-q, 3)
elif q is S.Zero:
y1, y2 = roots([1, 0, p], multiple=True)
return [tmp - aon3 for tmp in [y1, S.Zero, y2]]
elif q.is_real and q.is_negative:
u1 = -root(-q/2 + sqrt(q**2/4 + pon3**3), 3)
coeff = I*sqrt(3)/2
if u1 is None:
u1 = S.One
u2 = Rational(-1, 2) + coeff
u3 = Rational(-1, 2) - coeff
a, b, c, d = S(1), a, b, c
D0 = b**2 - 3*a*c
D1 = 2*b**3 - 9*a*b*c + 27*a**2*d
C = root((D1 + sqrt(D1**2 - 4*D0**3))/2, 3)
return [-(b + uk*C + D0/C/uk)/3/a for uk in [u1, u2, u3]]
u2 = u1*(Rational(-1, 2) + coeff)
u3 = u1*(Rational(-1, 2) - coeff)
if p is S.Zero:
return [u1 - aon3, u2 - aon3, u3 - aon3]
soln = [
-u1 + pon3/u1 - aon3,
-u2 + pon3/u2 - aon3,
-u3 + pon3/u3 - aon3
]
return soln
def _roots_quartic_euler(p, q, r, a):
"""
Descartes-Euler solution of the quartic equation
Parameters
==========
p, q, r: coefficients of ``x**4 + p*x**2 + q*x + r``
a: shift of the roots
Notes
=====
This is a helper function for ``roots_quartic``.
Look for solutions of the form ::
``x1 = sqrt(R) - sqrt(A + B*sqrt(R))``
``x2 = -sqrt(R) - sqrt(A - B*sqrt(R))``
``x3 = -sqrt(R) + sqrt(A - B*sqrt(R))``
``x4 = sqrt(R) + sqrt(A + B*sqrt(R))``
To satisfy the quartic equation one must have
``p = -2*(R + A); q = -4*B*R; r = (R - A)**2 - B**2*R``
so that ``R`` must satisfy the Descartes-Euler resolvent equation
``64*R**3 + 32*p*R**2 + (4*p**2 - 16*r)*R - q**2 = 0``
If the resolvent does not have a rational solution, return None;
in that case it is likely that the Ferrari method gives a simpler
solution.
Examples
========
>>> from sympy import S
>>> from sympy.polys.polyroots import _roots_quartic_euler
>>> p, q, r = -S(64)/5, -S(512)/125, -S(1024)/3125
>>> _roots_quartic_euler(p, q, r, S(0))[0]
-sqrt(32*sqrt(5)/125 + 16/5) + 4*sqrt(5)/5
"""
# solve the resolvent equation
x = Dummy('x')
eq = 64*x**3 + 32*p*x**2 + (4*p**2 - 16*r)*x - q**2
xsols = list(roots(Poly(eq, x), cubics=False).keys())
xsols = [sol for sol in xsols if sol.is_rational and sol.is_nonzero]
if not xsols:
return None
R = max(xsols)
c1 = sqrt(R)
B = -q*c1/(4*R)
A = -R - p/2
c2 = sqrt(A + B)
c3 = sqrt(A - B)
return [c1 - c2 - a, -c1 - c3 - a, -c1 + c3 - a, c1 + c2 - a]
def roots_quartic(f):
r"""
Returns a list of roots of a quartic polynomial.
There are many references for solving quartic expressions available [1-5].
This reviewer has found that many of them require one to select from among
2 or more possible sets of solutions and that some solutions work when one
is searching for real roots but don't work when searching for complex roots
(though this is not always stated clearly). The following routine has been
tested and found to be correct for 0, 2 or 4 complex roots.
The quasisymmetric case solution [6] looks for quartics that have the form
`x**4 + A*x**3 + B*x**2 + C*x + D = 0` where `(C/A)**2 = D`.
Although no general solution that is always applicable for all
coefficients is known to this reviewer, certain conditions are tested
to determine the simplest 4 expressions that can be returned:
1) `f = c + a*(a**2/8 - b/2) == 0`
2) `g = d - a*(a*(3*a**2/256 - b/16) + c/4) = 0`
3) if `f != 0` and `g != 0` and `p = -d + a*c/4 - b**2/12` then
a) `p == 0`
b) `p != 0`
Examples
========
>>> from sympy import Poly, symbols, I
>>> from sympy.polys.polyroots import roots_quartic
>>> r = roots_quartic(Poly('x**4-6*x**3+17*x**2-26*x+20'))
>>> # 4 complex roots: 1+-I*sqrt(3), 2+-I
>>> sorted(str(tmp.evalf(n=2)) for tmp in r)
['1.0 + 1.7*I', '1.0 - 1.7*I', '2.0 + 1.0*I', '2.0 - 1.0*I']
References
==========
1. http://mathforum.org/dr.math/faq/faq.cubic.equations.html
2. https://en.wikipedia.org/wiki/Quartic_function#Summary_of_Ferrari.27s_method
3. http://planetmath.org/encyclopedia/GaloisTheoreticDerivationOfTheQuarticFormula.html
4. http://staff.bath.ac.uk/masjhd/JHD-CA.pdf
5. http://www.albmath.org/files/Math_5713.pdf
6. http://www.statemaster.com/encyclopedia/Quartic-equation
7. eqworld.ipmnet.ru/en/solutions/ae/ae0108.pdf
"""
_, a, b, c, d = f.monic().all_coeffs()
if not d:
return [S.Zero] + roots([1, a, b, c], multiple=True)
elif (c/a)**2 == d:
x, m = f.gen, c/a
g = Poly(x**2 + a*x + b - 2*m, x)
z1, z2 = roots_quadratic(g)
h1 = Poly(x**2 - z1*x + m, x)
h2 = Poly(x**2 - z2*x + m, x)
r1 = roots_quadratic(h1)
r2 = roots_quadratic(h2)
return r1 + r2
else:
a2 = a**2
e = b - 3*a2/8
f = _mexpand(c + a*(a2/8 - b/2))
g = _mexpand(d - a*(a*(3*a2/256 - b/16) + c/4))
aon4 = a/4
if f is S.Zero:
y1, y2 = [sqrt(tmp) for tmp in
roots([1, e, g], multiple=True)]
return [tmp - aon4 for tmp in [-y1, -y2, y1, y2]]
if g is S.Zero:
y = [S.Zero] + roots([1, 0, e, f], multiple=True)
return [tmp - aon4 for tmp in y]
else:
# Descartes-Euler method, see [7]
sols = _roots_quartic_euler(e, f, g, aon4)
if sols:
return sols
# Ferrari method, see [1, 2]
a2 = a**2
e = b - 3*a2/8
f = c + a*(a2/8 - b/2)
g = d - a*(a*(3*a2/256 - b/16) + c/4)
p = -e**2/12 - g
q = -e**3/108 + e*g/3 - f**2/8
TH = Rational(1, 3)
def _ans(y):
w = sqrt(e + 2*y)
arg1 = 3*e + 2*y
arg2 = 2*f/w
ans = []
for s in [-1, 1]:
root = sqrt(-(arg1 + s*arg2))
for t in [-1, 1]:
ans.append((s*w - t*root)/2 - aon4)
return ans
# p == 0 case
y1 = e*Rational(-5, 6) - q**TH
if p.is_zero:
return _ans(y1)
# if p != 0 then u below is not 0
root = sqrt(q**2/4 + p**3/27)
r = -q/2 + root # or -q/2 - root
u = r**TH # primary root of solve(x**3 - r, x)
y2 = e*Rational(-5, 6) + u - p/u/3
if fuzzy_not(p.is_zero):
return _ans(y2)
# sort it out once they know the values of the coefficients
return [Piecewise((a1, Eq(p, 0)), (a2, True))
for a1, a2 in zip(_ans(y1), _ans(y2))]
def roots_binomial(f):
"""Returns a list of roots of a binomial polynomial. If the domain is ZZ
then the roots will be sorted with negatives coming before positives.
The ordering will be the same for any numerical coefficients as long as
the assumptions tested are correct, otherwise the ordering will not be
sorted (but will be canonical).
"""
n = f.degree()
a, b = f.nth(n), f.nth(0)
base = -cancel(b/a)
alpha = root(base, n)
if alpha.is_number:
alpha = alpha.expand(complex=True)
# define some parameters that will allow us to order the roots.
# If the domain is ZZ this is guaranteed to return roots sorted
# with reals before non-real roots and non-real sorted according
# to real part and imaginary part, e.g. -1, 1, -1 + I, 2 - I
neg = base.is_negative
even = n % 2 == 0
if neg:
if even == True and (base + 1).is_positive:
big = True
else:
big = False
# get the indices in the right order so the computed
# roots will be sorted when the domain is ZZ
ks = []
imax = n//2
if even:
ks.append(imax)
imax -= 1
if not neg:
ks.append(0)
for i in range(imax, 0, -1):
if neg:
ks.extend([i, -i])
else:
ks.extend([-i, i])
if neg:
ks.append(0)
if big:
for i in range(0, len(ks), 2):
pair = ks[i: i + 2]
pair = list(reversed(pair))
# compute the roots
roots, d = [], 2*I*pi/n
for k in ks:
zeta = exp(k*d).expand(complex=True)
roots.append((alpha*zeta).expand(power_base=False))
return roots
def _inv_totient_estimate(m):
"""
Find ``(L, U)`` such that ``L <= phi^-1(m) <= U``.
Examples
========
>>> from sympy.polys.polyroots import _inv_totient_estimate
>>> _inv_totient_estimate(192)
(192, 840)
>>> _inv_totient_estimate(400)
(400, 1750)
"""
primes = [ d + 1 for d in divisors(m) if isprime(d + 1) ]
a, b = 1, 1
for p in primes:
a *= p
b *= p - 1
L = m
U = int(math.ceil(m*(float(a)/b)))
P = p = 2
primes = []
while P <= U:
p = nextprime(p)
primes.append(p)
P *= p
P //= p
b = 1
for p in primes[:-1]:
b *= p - 1
U = int(math.ceil(m*(float(P)/b)))
return L, U
def roots_cyclotomic(f, factor=False):
"""Compute roots of cyclotomic polynomials. """
L, U = _inv_totient_estimate(f.degree())
for n in range(L, U + 1):
g = cyclotomic_poly(n, f.gen, polys=True)
if f == g:
break
else: # pragma: no cover
raise RuntimeError("failed to find index of a cyclotomic polynomial")
roots = []
if not factor:
# get the indices in the right order so the computed
# roots will be sorted
h = n//2
ks = [i for i in range(1, n + 1) if igcd(i, n) == 1]
ks.sort(key=lambda x: (x, -1) if x <= h else (abs(x - n), 1))
d = 2*I*pi/n
for k in reversed(ks):
roots.append(exp(k*d).expand(complex=True))
else:
g = Poly(f, extension=root(-1, n))
for h, _ in ordered(g.factor_list()[1]):
roots.append(-h.TC())
return roots
def roots_quintic(f):
"""
Calculate exact roots of a solvable quintic
"""
result = []
coeff_5, coeff_4, p, q, r, s = f.all_coeffs()
# Eqn must be of the form x^5 + px^3 + qx^2 + rx + s
if coeff_4:
return result
if coeff_5 != 1:
l = [p/coeff_5, q/coeff_5, r/coeff_5, s/coeff_5]
if not all(coeff.is_Rational for coeff in l):
return result
f = Poly(f/coeff_5)
quintic = PolyQuintic(f)
# Eqn standardized. Algo for solving starts here
if not f.is_irreducible:
return result
f20 = quintic.f20
# Check if f20 has linear factors over domain Z
if f20.is_irreducible:
return result
# Now, we know that f is solvable
for _factor in f20.factor_list()[1]:
if _factor[0].is_linear:
theta = _factor[0].root(0)
break
d = discriminant(f)
delta = sqrt(d)
# zeta = a fifth root of unity
zeta1, zeta2, zeta3, zeta4 = quintic.zeta
T = quintic.T(theta, d)
tol = S(1e-10)
alpha = T[1] + T[2]*delta
alpha_bar = T[1] - T[2]*delta
beta = T[3] + T[4]*delta
beta_bar = T[3] - T[4]*delta
disc = alpha**2 - 4*beta
disc_bar = alpha_bar**2 - 4*beta_bar
l0 = quintic.l0(theta)
l1 = _quintic_simplify((-alpha + sqrt(disc)) / S(2))
l4 = _quintic_simplify((-alpha - sqrt(disc)) / S(2))
l2 = _quintic_simplify((-alpha_bar + sqrt(disc_bar)) / S(2))
l3 = _quintic_simplify((-alpha_bar - sqrt(disc_bar)) / S(2))
order = quintic.order(theta, d)
test = (order*delta.n()) - ( (l1.n() - l4.n())*(l2.n() - l3.n()) )
# Comparing floats
if not comp(test, 0, tol):
l2, l3 = l3, l2
# Now we have correct order of l's
R1 = l0 + l1*zeta1 + l2*zeta2 + l3*zeta3 + l4*zeta4
R2 = l0 + l3*zeta1 + l1*zeta2 + l4*zeta3 + l2*zeta4
R3 = l0 + l2*zeta1 + l4*zeta2 + l1*zeta3 + l3*zeta4
R4 = l0 + l4*zeta1 + l3*zeta2 + l2*zeta3 + l1*zeta4
Res = [None, [None]*5, [None]*5, [None]*5, [None]*5]
Res_n = [None, [None]*5, [None]*5, [None]*5, [None]*5]
sol = Symbol('sol')
# Simplifying improves performance a lot for exact expressions
R1 = _quintic_simplify(R1)
R2 = _quintic_simplify(R2)
R3 = _quintic_simplify(R3)
R4 = _quintic_simplify(R4)
# Solve imported here. Causing problems if imported as 'solve'
# and hence the changed name
from sympy.solvers.solvers import solve as _solve
a, b = symbols('a b', cls=Dummy)
_sol = _solve( sol**5 - a - I*b, sol)
for i in range(5):
_sol[i] = factor(_sol[i])
R1 = R1.as_real_imag()
R2 = R2.as_real_imag()
R3 = R3.as_real_imag()
R4 = R4.as_real_imag()
for i, currentroot in enumerate(_sol):
Res[1][i] = _quintic_simplify(currentroot.subs({ a: R1[0], b: R1[1] }))
Res[2][i] = _quintic_simplify(currentroot.subs({ a: R2[0], b: R2[1] }))
Res[3][i] = _quintic_simplify(currentroot.subs({ a: R3[0], b: R3[1] }))
Res[4][i] = _quintic_simplify(currentroot.subs({ a: R4[0], b: R4[1] }))
for i in range(1, 5):
for j in range(5):
Res_n[i][j] = Res[i][j].n()
Res[i][j] = _quintic_simplify(Res[i][j])
r1 = Res[1][0]
r1_n = Res_n[1][0]
for i in range(5):
if comp(im(r1_n*Res_n[4][i]), 0, tol):
r4 = Res[4][i]
break
# Now we have various Res values. Each will be a list of five
# values. We have to pick one r value from those five for each Res
u, v = quintic.uv(theta, d)
testplus = (u + v*delta*sqrt(5)).n()
testminus = (u - v*delta*sqrt(5)).n()
# Evaluated numbers suffixed with _n
# We will use evaluated numbers for calculation. Much faster.
r4_n = r4.n()
r2 = r3 = None
for i in range(5):
r2temp_n = Res_n[2][i]
for j in range(5):
# Again storing away the exact number and using
# evaluated numbers in computations
r3temp_n = Res_n[3][j]
if (comp((r1_n*r2temp_n**2 + r4_n*r3temp_n**2 - testplus).n(), 0, tol) and
comp((r3temp_n*r1_n**2 + r2temp_n*r4_n**2 - testminus).n(), 0, tol)):
r2 = Res[2][i]
r3 = Res[3][j]
break
if r2:
break
# Now, we have r's so we can get roots
x1 = (r1 + r2 + r3 + r4)/5
x2 = (r1*zeta4 + r2*zeta3 + r3*zeta2 + r4*zeta1)/5
x3 = (r1*zeta3 + r2*zeta1 + r3*zeta4 + r4*zeta2)/5
x4 = (r1*zeta2 + r2*zeta4 + r3*zeta1 + r4*zeta3)/5
x5 = (r1*zeta1 + r2*zeta2 + r3*zeta3 + r4*zeta4)/5
result = [x1, x2, x3, x4, x5]
# Now check if solutions are distinct
saw = set()
for r in result:
r = r.n(2)
if r in saw:
# Roots were identical. Abort, return []
# and fall back to usual solve
return []
saw.add(r)
return result
def _quintic_simplify(expr):
expr = powsimp(expr)
expr = cancel(expr)
return together(expr)
def _integer_basis(poly):
"""Compute coefficient basis for a polynomial over integers.
Returns the integer ``div`` such that substituting ``x = div*y``
``p(x) = m*q(y)`` where the coefficients of ``q`` are smaller
than those of ``p``.
For example ``x**5 + 512*x + 1024 = 0``
with ``div = 4`` becomes ``y**5 + 2*y + 1 = 0``
Returns the integer ``div`` or ``None`` if there is no possible scaling.
Examples
========
>>> from sympy.polys import Poly
>>> from sympy.abc import x
>>> from sympy.polys.polyroots import _integer_basis
>>> p = Poly(x**5 + 512*x + 1024, x, domain='ZZ')
>>> _integer_basis(p)
4
"""
monoms, coeffs = list(zip(*poly.terms()))
monoms, = list(zip(*monoms))
coeffs = list(map(abs, coeffs))
if coeffs[0] < coeffs[-1]:
coeffs = list(reversed(coeffs))
n = monoms[0]
monoms = [n - i for i in reversed(monoms)]
else:
return None
monoms = monoms[:-1]
coeffs = coeffs[:-1]
divs = reversed(divisors(gcd_list(coeffs))[1:])
try:
div = next(divs)
except StopIteration:
return None
while True:
for monom, coeff in zip(monoms, coeffs):
if coeff % div**monom != 0:
try:
div = next(divs)
except StopIteration:
return None
else:
break
else:
return div
def preprocess_roots(poly):
"""Try to get rid of symbolic coefficients from ``poly``. """
coeff = S.One
poly_func = poly.func
try:
_, poly = poly.clear_denoms(convert=True)
except DomainError:
return coeff, poly
poly = poly.primitive()[1]
poly = poly.retract()
# TODO: This is fragile. Figure out how to make this independent of construct_domain().
if poly.get_domain().is_Poly and all(c.is_term for c in poly.rep.coeffs()):
poly = poly.inject()
strips = list(zip(*poly.monoms()))
gens = list(poly.gens[1:])
base, strips = strips[0], strips[1:]
for gen, strip in zip(list(gens), strips):
reverse = False
if strip[0] < strip[-1]:
strip = reversed(strip)
reverse = True
ratio = None
for a, b in zip(base, strip):
if not a and not b:
continue
elif not a or not b:
break
elif b % a != 0:
break
else:
_ratio = b // a
if ratio is None:
ratio = _ratio
elif ratio != _ratio:
break
else:
if reverse:
ratio = -ratio
poly = poly.eval(gen, 1)
coeff *= gen**(-ratio)
gens.remove(gen)
if gens:
poly = poly.eject(*gens)
if poly.is_univariate and poly.get_domain().is_ZZ:
basis = _integer_basis(poly)
if basis is not None:
n = poly.degree()
def func(k, coeff):
return coeff//basis**(n - k[0])
poly = poly.termwise(func)
coeff *= basis
if not isinstance(poly, poly_func):
poly = poly_func(poly)
return coeff, poly
@public
def roots(f, *gens, **flags):
"""
Computes symbolic roots of a univariate polynomial.
Given a univariate polynomial f with symbolic coefficients (or
a list of the polynomial's coefficients), returns a dictionary
with its roots and their multiplicities.
Only roots expressible via radicals will be returned. To get
a complete set of roots use RootOf class or numerical methods
instead. By default cubic and quartic formulas are used in
the algorithm. To disable them because of unreadable output
set ``cubics=False`` or ``quartics=False`` respectively. If cubic
roots are real but are expressed in terms of complex numbers
(casus irreducibilis [1]) the ``trig`` flag can be set to True to
have the solutions returned in terms of cosine and inverse cosine
functions.
To get roots from a specific domain set the ``filter`` flag with
one of the following specifiers: Z, Q, R, I, C. By default all
roots are returned (this is equivalent to setting ``filter='C'``).
By default a dictionary is returned giving a compact result in
case of multiple roots. However to get a list containing all
those roots set the ``multiple`` flag to True; the list will
have identical roots appearing next to each other in the result.
(For a given Poly, the all_roots method will give the roots in
sorted numerical order.)
Examples
========
>>> from sympy import Poly, roots
>>> from sympy.abc import x, y
>>> roots(x**2 - 1, x)
{-1: 1, 1: 1}
>>> p = Poly(x**2-1, x)
>>> roots(p)
{-1: 1, 1: 1}
>>> p = Poly(x**2-y, x, y)
>>> roots(Poly(p, x))
{-sqrt(y): 1, sqrt(y): 1}
>>> roots(x**2 - y, x)
{-sqrt(y): 1, sqrt(y): 1}
>>> roots([1, 0, -1])
{-1: 1, 1: 1}
References
==========
.. [1] https://en.wikipedia.org/wiki/Cubic_function#Trigonometric_.28and_hyperbolic.29_method
"""
from sympy.polys.polytools import to_rational_coeffs
flags = dict(flags)
auto = flags.pop('auto', True)
cubics = flags.pop('cubics', True)
trig = flags.pop('trig', False)
quartics = flags.pop('quartics', True)
quintics = flags.pop('quintics', False)
multiple = flags.pop('multiple', False)
filter = flags.pop('filter', None)
predicate = flags.pop('predicate', None)
if isinstance(f, list):
if gens:
raise ValueError('redundant generators given')
x = Dummy('x')
poly, i = {}, len(f) - 1
for coeff in f:
poly[i], i = sympify(coeff), i - 1
f = Poly(poly, x, field=True)
else:
try:
f = Poly(f, *gens, **flags)
if f.length == 2 and f.degree() != 1:
# check for foo**n factors in the constant
n = f.degree()
npow_bases = []
others = []
expr = f.as_expr()
con = expr.as_independent(*gens)[0]
for p in Mul.make_args(con):
if p.is_Pow and not p.exp % n:
npow_bases.append(p.base**(p.exp/n))
else:
others.append(p)
if npow_bases:
b = Mul(*npow_bases)
B = Dummy()
d = roots(Poly(expr - con + B**n*Mul(*others), *gens,
**flags), *gens, **flags)
rv = {}
for k, v in d.items():
rv[k.subs(B, b)] = v
return rv
except GeneratorsNeeded:
if multiple:
return []
else:
return {}
if f.is_multivariate:
raise PolynomialError('multivariate polynomials are not supported')
def _update_dict(result, currentroot, k):
if currentroot in result:
result[currentroot] += k
else:
result[currentroot] = k
def _try_decompose(f):
"""Find roots using functional decomposition. """
factors, roots = f.decompose(), []
for currentroot in _try_heuristics(factors[0]):
roots.append(currentroot)
for currentfactor in factors[1:]:
previous, roots = list(roots), []
for currentroot in previous:
g = currentfactor - Poly(currentroot, f.gen)
for currentroot in _try_heuristics(g):
roots.append(currentroot)
return roots
def _try_heuristics(f):
"""Find roots using formulas and some tricks. """
if f.is_ground:
return []
if f.is_monomial:
return [S.Zero]*f.degree()
if f.length() == 2:
if f.degree() == 1:
return list(map(cancel, roots_linear(f)))
else:
return roots_binomial(f)
result = []
for i in [-1, 1]:
if not f.eval(i):
f = f.quo(Poly(f.gen - i, f.gen))
result.append(i)
break
n = f.degree()
if n == 1:
result += list(map(cancel, roots_linear(f)))
elif n == 2:
result += list(map(cancel, roots_quadratic(f)))
elif f.is_cyclotomic:
result += roots_cyclotomic(f)
elif n == 3 and cubics:
result += roots_cubic(f, trig=trig)
elif n == 4 and quartics:
result += roots_quartic(f)
elif n == 5 and quintics:
result += roots_quintic(f)
return result
(k,), f = f.terms_gcd()
if not k:
zeros = {}
else:
zeros = {S.Zero: k}
coeff, f = preprocess_roots(f)
if auto and f.get_domain().is_Ring:
f = f.to_field()
rescale_x = None
translate_x = None
result = {}
if not f.is_ground:
dom = f.get_domain()
if not dom.is_Exact and dom.is_Numerical:
for r in f.nroots():
_update_dict(result, r, 1)
elif f.degree() == 1:
result[roots_linear(f)[0]] = 1
elif f.length() == 2:
roots_fun = roots_quadratic if f.degree() == 2 else roots_binomial
for r in roots_fun(f):
_update_dict(result, r, 1)
else:
_, factors = Poly(f.as_expr()).factor_list()
if len(factors) == 1 and f.degree() == 2:
for r in roots_quadratic(f):
_update_dict(result, r, 1)
else:
if len(factors) == 1 and factors[0][1] == 1:
if f.get_domain().is_EX:
res = to_rational_coeffs(f)
if res:
if res[0] is None:
translate_x, f = res[2:]
else:
rescale_x, f = res[1], res[-1]
result = roots(f)
if not result:
for currentroot in _try_decompose(f):
_update_dict(result, currentroot, 1)
else:
for r in _try_heuristics(f):
_update_dict(result, r, 1)
else:
for currentroot in _try_decompose(f):
_update_dict(result, currentroot, 1)
else:
for currentfactor, k in factors:
for r in _try_heuristics(Poly(currentfactor, f.gen, field=True)):
_update_dict(result, r, k)
if coeff is not S.One:
_result, result, = result, {}
for currentroot, k in _result.items():
result[coeff*currentroot] = k
if filter not in [None, 'C']:
handlers = {
'Z': lambda r: r.is_Integer,
'Q': lambda r: r.is_Rational,
'R': lambda r: all(a.is_real for a in r.as_numer_denom()),
'I': lambda r: r.is_imaginary,
}
try:
query = handlers[filter]
except KeyError:
raise ValueError("Invalid filter: %s" % filter)
for zero in dict(result).keys():
if not query(zero):
del result[zero]
if predicate is not None:
for zero in dict(result).keys():
if not predicate(zero):
del result[zero]
if rescale_x:
result1 = {}
for k, v in result.items():
result1[k*rescale_x] = v
result = result1
if translate_x:
result1 = {}
for k, v in result.items():
result1[k + translate_x] = v
result = result1
# adding zero roots after non-trivial roots have been translated
result.update(zeros)
if not multiple:
return result
else:
zeros = []
for zero in ordered(result):
zeros.extend([zero]*result[zero])
return zeros
def root_factors(f, *gens, **args):
"""
Returns all factors of a univariate polynomial.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.polys.polyroots import root_factors
>>> root_factors(x**2 - y, x)
[x - sqrt(y), x + sqrt(y)]
"""
args = dict(args)
filter = args.pop('filter', None)
F = Poly(f, *gens, **args)
if not F.is_Poly:
return [f]
if F.is_multivariate:
raise ValueError('multivariate polynomials are not supported')
x = F.gens[0]
zeros = roots(F, filter=filter)
if not zeros:
factors = [F]
else:
factors, N = [], 0
for r, n in ordered(zeros.items()):
factors, N = factors + [Poly(x - r, x)]*n, N + n
if N < F.degree():
G = reduce(lambda p, q: p*q, factors)
factors.append(F.quo(G))
if not isinstance(f, Poly):
factors = [ f.as_expr() for f in factors ]
return factors
| {
"repo_name": "kaushik94/sympy",
"path": "sympy/polys/polyroots.py",
"copies": "1",
"size": "33045",
"license": "bsd-3-clause",
"hash": -6635360428513781000,
"line_mean": 28.5044642857,
"line_max": 104,
"alpha_frac": 0.5168709336,
"autogenerated": false,
"ratio": 3.2566275746526068,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42734985082526067,
"avg_score": null,
"num_lines": null
} |
"""Algorithms for computing symbolic roots of polynomials. """
from __future__ import print_function, division
import math
from sympy.core.symbol import Dummy, Symbol, symbols
from sympy.core import S, I, pi
from sympy.core.compatibility import ordered
from sympy.core.mul import expand_2arg, Mul
from sympy.core.power import Pow
from sympy.core.relational import Eq
from sympy.core.sympify import sympify
from sympy.core.numbers import Rational, igcd, comp
from sympy.core.exprtools import factor_terms
from sympy.core.logic import fuzzy_not
from sympy.ntheory import divisors, isprime, nextprime
from sympy.functions import exp, sqrt, im, cos, acos, Piecewise
from sympy.functions.elementary.miscellaneous import root
from sympy.polys.polytools import Poly, cancel, factor, gcd_list, discriminant
from sympy.polys.specialpolys import cyclotomic_poly
from sympy.polys.polyerrors import (PolynomialError, GeneratorsNeeded,
DomainError)
from sympy.polys.polyquinticconst import PolyQuintic
from sympy.polys.rationaltools import together
from sympy.simplify import simplify, powsimp
from sympy.utilities import public
from sympy.core.compatibility import reduce, range
def roots_linear(f):
"""Returns a list of roots of a linear polynomial."""
r = -f.nth(0)/f.nth(1)
dom = f.get_domain()
if not dom.is_Numerical:
if dom.is_Composite:
r = factor(r)
else:
r = simplify(r)
return [r]
def roots_quadratic(f):
"""Returns a list of roots of a quadratic polynomial. If the domain is ZZ
then the roots will be sorted with negatives coming before positives.
The ordering will be the same for any numerical coefficients as long as
the assumptions tested are correct, otherwise the ordering will not be
sorted (but will be canonical).
"""
a, b, c = f.all_coeffs()
dom = f.get_domain()
def _sqrt(d):
# remove squares from square root since both will be represented
# in the results; a similar thing is happening in roots() but
# must be duplicated here because not all quadratics are binomials
co = []
other = []
for di in Mul.make_args(d):
if di.is_Pow and di.exp.is_Integer and di.exp % 2 == 0:
co.append(Pow(di.base, di.exp//2))
else:
other.append(di)
if co:
d = Mul(*other)
co = Mul(*co)
return co*sqrt(d)
return sqrt(d)
def _simplify(expr):
if dom.is_Composite:
return factor(expr)
else:
return simplify(expr)
if c is S.Zero:
r0, r1 = S.Zero, -b/a
if not dom.is_Numerical:
r1 = _simplify(r1)
elif r1.is_negative:
r0, r1 = r1, r0
elif b is S.Zero:
r = -c/a
if not dom.is_Numerical:
r = _simplify(r)
R = _sqrt(r)
r0 = -R
r1 = R
else:
d = b**2 - 4*a*c
A = 2*a
B = -b/A
if not dom.is_Numerical:
d = _simplify(d)
B = _simplify(B)
D = factor_terms(_sqrt(d)/A)
r0 = B - D
r1 = B + D
if a.is_negative:
r0, r1 = r1, r0
elif not dom.is_Numerical:
r0, r1 = [expand_2arg(i) for i in (r0, r1)]
return [r0, r1]
def roots_cubic(f, trig=False):
"""Returns a list of roots of a cubic polynomial.
References
==========
[1] https://en.wikipedia.org/wiki/Cubic_function, General formula for roots,
(accessed November 17, 2014).
"""
if trig:
a, b, c, d = f.all_coeffs()
p = (3*a*c - b**2)/3/a**2
q = (2*b**3 - 9*a*b*c + 27*a**2*d)/(27*a**3)
D = 18*a*b*c*d - 4*b**3*d + b**2*c**2 - 4*a*c**3 - 27*a**2*d**2
if (D > 0) == True:
rv = []
for k in range(3):
rv.append(2*sqrt(-p/3)*cos(acos(3*q/2/p*sqrt(-3/p))/3 - k*2*pi/3))
return [i - b/3/a for i in rv]
_, a, b, c = f.monic().all_coeffs()
if c is S.Zero:
x1, x2 = roots([1, a, b], multiple=True)
return [x1, S.Zero, x2]
p = b - a**2/3
q = c - a*b/3 + 2*a**3/27
pon3 = p/3
aon3 = a/3
u1 = None
if p is S.Zero:
if q is S.Zero:
return [-aon3]*3
if q.is_real:
if q.is_positive:
u1 = -root(q, 3)
elif q.is_negative:
u1 = root(-q, 3)
elif q is S.Zero:
y1, y2 = roots([1, 0, p], multiple=True)
return [tmp - aon3 for tmp in [y1, S.Zero, y2]]
elif q.is_real and q.is_negative:
u1 = -root(-q/2 + sqrt(q**2/4 + pon3**3), 3)
coeff = I*sqrt(3)/2
if u1 is None:
u1 = S(1)
u2 = -S.Half + coeff
u3 = -S.Half - coeff
a, b, c, d = S(1), a, b, c
D0 = b**2 - 3*a*c
D1 = 2*b**3 - 9*a*b*c + 27*a**2*d
C = root((D1 + sqrt(D1**2 - 4*D0**3))/2, 3)
return [-(b + uk*C + D0/C/uk)/3/a for uk in [u1, u2, u3]]
u2 = u1*(-S.Half + coeff)
u3 = u1*(-S.Half - coeff)
if p is S.Zero:
return [u1 - aon3, u2 - aon3, u3 - aon3]
soln = [
-u1 + pon3/u1 - aon3,
-u2 + pon3/u2 - aon3,
-u3 + pon3/u3 - aon3
]
return soln
def _roots_quartic_euler(p, q, r, a):
"""
Descartes-Euler solution of the quartic equation
Parameters
==========
p, q, r: coefficients of ``x**4 + p*x**2 + q*x + r``
a: shift of the roots
Notes
=====
This is a helper function for ``roots_quartic``.
Look for solutions of the form ::
``x1 = sqrt(R) - sqrt(A + B*sqrt(R))``
``x2 = -sqrt(R) - sqrt(A - B*sqrt(R))``
``x3 = -sqrt(R) + sqrt(A - B*sqrt(R))``
``x4 = sqrt(R) + sqrt(A + B*sqrt(R))``
To satisfy the quartic equation one must have
``p = -2*(R + A); q = -4*B*R; r = (R - A)**2 - B**2*R``
so that ``R`` must satisfy the Descartes-Euler resolvent equation
``64*R**3 + 32*p*R**2 + (4*p**2 - 16*r)*R - q**2 = 0``
If the resolvent does not have a rational solution, return None;
in that case it is likely that the Ferrari method gives a simpler
solution.
Examples
========
>>> from sympy import S
>>> from sympy.polys.polyroots import _roots_quartic_euler
>>> p, q, r = -S(64)/5, -S(512)/125, -S(1024)/3125
>>> _roots_quartic_euler(p, q, r, S(0))[0]
-sqrt(32*sqrt(5)/125 + 16/5) + 4*sqrt(5)/5
"""
# solve the resolvent equation
x = Symbol('x')
eq = 64*x**3 + 32*p*x**2 + (4*p**2 - 16*r)*x - q**2
xsols = list(roots(Poly(eq, x), cubics=False).keys())
xsols = [sol for sol in xsols if sol.is_rational]
if not xsols:
return None
R = max(xsols)
c1 = sqrt(R)
B = -q*c1/(4*R)
A = -R - p/2
c2 = sqrt(A + B)
c3 = sqrt(A - B)
return [c1 - c2 - a, -c1 - c3 - a, -c1 + c3 - a, c1 + c2 - a]
def roots_quartic(f):
r"""
Returns a list of roots of a quartic polynomial.
There are many references for solving quartic expressions available [1-5].
This reviewer has found that many of them require one to select from among
2 or more possible sets of solutions and that some solutions work when one
is searching for real roots but don't work when searching for complex roots
(though this is not always stated clearly). The following routine has been
tested and found to be correct for 0, 2 or 4 complex roots.
The quasisymmetric case solution [6] looks for quartics that have the form
`x**4 + A*x**3 + B*x**2 + C*x + D = 0` where `(C/A)**2 = D`.
Although no general solution that is always applicable for all
coefficients is known to this reviewer, certain conditions are tested
to determine the simplest 4 expressions that can be returned:
1) `f = c + a*(a**2/8 - b/2) == 0`
2) `g = d - a*(a*(3*a**2/256 - b/16) + c/4) = 0`
3) if `f != 0` and `g != 0` and `p = -d + a*c/4 - b**2/12` then
a) `p == 0`
b) `p != 0`
Examples
========
>>> from sympy import Poly, symbols, I
>>> from sympy.polys.polyroots import roots_quartic
>>> r = roots_quartic(Poly('x**4-6*x**3+17*x**2-26*x+20'))
>>> # 4 complex roots: 1+-I*sqrt(3), 2+-I
>>> sorted(str(tmp.evalf(n=2)) for tmp in r)
['1.0 + 1.7*I', '1.0 - 1.7*I', '2.0 + 1.0*I', '2.0 - 1.0*I']
References
==========
1. http://mathforum.org/dr.math/faq/faq.cubic.equations.html
2. http://en.wikipedia.org/wiki/Quartic_function#Summary_of_Ferrari.27s_method
3. http://planetmath.org/encyclopedia/GaloisTheoreticDerivationOfTheQuarticFormula.html
4. http://staff.bath.ac.uk/masjhd/JHD-CA.pdf
5. http://www.albmath.org/files/Math_5713.pdf
6. http://www.statemaster.com/encyclopedia/Quartic-equation
7. eqworld.ipmnet.ru/en/solutions/ae/ae0108.pdf
"""
_, a, b, c, d = f.monic().all_coeffs()
if not d:
return [S.Zero] + roots([1, a, b, c], multiple=True)
elif (c/a)**2 == d:
x, m = f.gen, c/a
g = Poly(x**2 + a*x + b - 2*m, x)
z1, z2 = roots_quadratic(g)
h1 = Poly(x**2 - z1*x + m, x)
h2 = Poly(x**2 - z2*x + m, x)
r1 = roots_quadratic(h1)
r2 = roots_quadratic(h2)
return r1 + r2
else:
a2 = a**2
e = b - 3*a2/8
f = c + a*(a2/8 - b/2)
g = d - a*(a*(3*a2/256 - b/16) + c/4)
aon4 = a/4
if f is S.Zero:
y1, y2 = [sqrt(tmp) for tmp in
roots([1, e, g], multiple=True)]
return [tmp - aon4 for tmp in [-y1, -y2, y1, y2]]
if g is S.Zero:
y = [S.Zero] + roots([1, 0, e, f], multiple=True)
return [tmp - aon4 for tmp in y]
else:
# Descartes-Euler method, see [7]
sols = _roots_quartic_euler(e, f, g, aon4)
if sols:
return sols
# Ferrari method, see [1, 2]
a2 = a**2
e = b - 3*a2/8
f = c + a*(a2/8 - b/2)
g = d - a*(a*(3*a2/256 - b/16) + c/4)
p = -e**2/12 - g
q = -e**3/108 + e*g/3 - f**2/8
TH = Rational(1, 3)
def _ans(y):
w = sqrt(e + 2*y)
arg1 = 3*e + 2*y
arg2 = 2*f/w
ans = []
for s in [-1, 1]:
root = sqrt(-(arg1 + s*arg2))
for t in [-1, 1]:
ans.append((s*w - t*root)/2 - aon4)
return ans
# p == 0 case
y1 = -5*e/6 - q**TH
if p.is_zero:
return _ans(y1)
# if p != 0 then u below is not 0
root = sqrt(q**2/4 + p**3/27)
r = -q/2 + root # or -q/2 - root
u = r**TH # primary root of solve(x**3 - r, x)
y2 = -5*e/6 + u - p/u/3
if fuzzy_not(p.is_zero):
return _ans(y2)
# sort it out once they know the values of the coefficients
return [Piecewise((a1, Eq(p, 0)), (a2, True))
for a1, a2 in zip(_ans(y1), _ans(y2))]
def roots_binomial(f):
"""Returns a list of roots of a binomial polynomial. If the domain is ZZ
then the roots will be sorted with negatives coming before positives.
The ordering will be the same for any numerical coefficients as long as
the assumptions tested are correct, otherwise the ordering will not be
sorted (but will be canonical).
"""
n = f.degree()
a, b = f.nth(n), f.nth(0)
base = -cancel(b/a)
alpha = root(base, n)
if alpha.is_number:
alpha = alpha.expand(complex=True)
# define some parameters that will allow us to order the roots.
# If the domain is ZZ this is guaranteed to return roots sorted
# with reals before non-real roots and non-real sorted according
# to real part and imaginary part, e.g. -1, 1, -1 + I, 2 - I
neg = base.is_negative
even = n % 2 == 0
if neg:
if even == True and (base + 1).is_positive:
big = True
else:
big = False
# get the indices in the right order so the computed
# roots will be sorted when the domain is ZZ
ks = []
imax = n//2
if even:
ks.append(imax)
imax -= 1
if not neg:
ks.append(0)
for i in range(imax, 0, -1):
if neg:
ks.extend([i, -i])
else:
ks.extend([-i, i])
if neg:
ks.append(0)
if big:
for i in range(0, len(ks), 2):
pair = ks[i: i + 2]
pair = list(reversed(pair))
# compute the roots
roots, d = [], 2*I*pi/n
for k in ks:
zeta = exp(k*d).expand(complex=True)
roots.append((alpha*zeta).expand(power_base=False))
return roots
def _inv_totient_estimate(m):
"""
Find ``(L, U)`` such that ``L <= phi^-1(m) <= U``.
Examples
========
>>> from sympy.polys.polyroots import _inv_totient_estimate
>>> _inv_totient_estimate(192)
(192, 840)
>>> _inv_totient_estimate(400)
(400, 1750)
"""
primes = [ d + 1 for d in divisors(m) if isprime(d + 1) ]
a, b = 1, 1
for p in primes:
a *= p
b *= p - 1
L = m
U = int(math.ceil(m*(float(a)/b)))
P = p = 2
primes = []
while P <= U:
p = nextprime(p)
primes.append(p)
P *= p
P //= p
b = 1
for p in primes[:-1]:
b *= p - 1
U = int(math.ceil(m*(float(P)/b)))
return L, U
def roots_cyclotomic(f, factor=False):
"""Compute roots of cyclotomic polynomials. """
L, U = _inv_totient_estimate(f.degree())
for n in range(L, U + 1):
g = cyclotomic_poly(n, f.gen, polys=True)
if f == g:
break
else: # pragma: no cover
raise RuntimeError("failed to find index of a cyclotomic polynomial")
roots = []
if not factor:
# get the indices in the right order so the computed
# roots will be sorted
h = n//2
ks = [i for i in range(1, n + 1) if igcd(i, n) == 1]
ks.sort(key=lambda x: (x, -1) if x <= h else (abs(x - n), 1))
d = 2*I*pi/n
for k in reversed(ks):
roots.append(exp(k*d).expand(complex=True))
else:
g = Poly(f, extension=root(-1, n))
for h, _ in ordered(g.factor_list()[1]):
roots.append(-h.TC())
return roots
def roots_quintic(f):
"""
Calulate exact roots of a solvable quintic
"""
result = []
coeff_5, coeff_4, p, q, r, s = f.all_coeffs()
# Eqn must be of the form x^5 + px^3 + qx^2 + rx + s
if coeff_4:
return result
if coeff_5 != 1:
l = [p/coeff_5, q/coeff_5, r/coeff_5, s/coeff_5]
if not all(coeff.is_Rational for coeff in l):
return result
f = Poly(f/coeff_5)
quintic = PolyQuintic(f)
# Eqn standardized. Algo for solving starts here
if not f.is_irreducible:
return result
f20 = quintic.f20
# Check if f20 has linear factors over domain Z
if f20.is_irreducible:
return result
# Now, we know that f is solvable
for _factor in f20.factor_list()[1]:
if _factor[0].is_linear:
theta = _factor[0].root(0)
break
d = discriminant(f)
delta = sqrt(d)
# zeta = a fifth root of unity
zeta1, zeta2, zeta3, zeta4 = quintic.zeta
T = quintic.T(theta, d)
tol = S(1e-10)
alpha = T[1] + T[2]*delta
alpha_bar = T[1] - T[2]*delta
beta = T[3] + T[4]*delta
beta_bar = T[3] - T[4]*delta
disc = alpha**2 - 4*beta
disc_bar = alpha_bar**2 - 4*beta_bar
l0 = quintic.l0(theta)
l1 = _quintic_simplify((-alpha + sqrt(disc)) / S(2))
l4 = _quintic_simplify((-alpha - sqrt(disc)) / S(2))
l2 = _quintic_simplify((-alpha_bar + sqrt(disc_bar)) / S(2))
l3 = _quintic_simplify((-alpha_bar - sqrt(disc_bar)) / S(2))
order = quintic.order(theta, d)
test = (order*delta.n()) - ( (l1.n() - l4.n())*(l2.n() - l3.n()) )
# Comparing floats
if not comp(test, 0, tol):
l2, l3 = l3, l2
# Now we have correct order of l's
R1 = l0 + l1*zeta1 + l2*zeta2 + l3*zeta3 + l4*zeta4
R2 = l0 + l3*zeta1 + l1*zeta2 + l4*zeta3 + l2*zeta4
R3 = l0 + l2*zeta1 + l4*zeta2 + l1*zeta3 + l3*zeta4
R4 = l0 + l4*zeta1 + l3*zeta2 + l2*zeta3 + l1*zeta4
Res = [None, [None]*5, [None]*5, [None]*5, [None]*5]
Res_n = [None, [None]*5, [None]*5, [None]*5, [None]*5]
sol = Symbol('sol')
# Simplifying improves performace a lot for exact expressions
R1 = _quintic_simplify(R1)
R2 = _quintic_simplify(R2)
R3 = _quintic_simplify(R3)
R4 = _quintic_simplify(R4)
# Solve imported here. Causing problems if imported as 'solve'
# and hence the changed name
from sympy.solvers.solvers import solve as _solve
a, b = symbols('a b', cls=Dummy)
_sol = _solve( sol**5 - a - I*b, sol)
for i in range(5):
_sol[i] = factor(_sol[i])
R1 = R1.as_real_imag()
R2 = R2.as_real_imag()
R3 = R3.as_real_imag()
R4 = R4.as_real_imag()
for i, root in enumerate(_sol):
Res[1][i] = _quintic_simplify(root.subs({ a: R1[0], b: R1[1] }))
Res[2][i] = _quintic_simplify(root.subs({ a: R2[0], b: R2[1] }))
Res[3][i] = _quintic_simplify(root.subs({ a: R3[0], b: R3[1] }))
Res[4][i] = _quintic_simplify(root.subs({ a: R4[0], b: R4[1] }))
for i in range(1, 5):
for j in range(5):
Res_n[i][j] = Res[i][j].n()
Res[i][j] = _quintic_simplify(Res[i][j])
r1 = Res[1][0]
r1_n = Res_n[1][0]
for i in range(5):
if comp(im(r1_n*Res_n[4][i]), 0, tol):
r4 = Res[4][i]
break
u, v = quintic.uv(theta, d)
sqrt5 = math.sqrt(5)
# Now we have various Res values. Each will be a list of five
# values. We have to pick one r value from those five for each Res
u, v = quintic.uv(theta, d)
testplus = (u + v*delta*sqrt(5)).n()
testminus = (u - v*delta*sqrt(5)).n()
# Evaluated numbers suffixed with _n
# We will use evaluated numbers for calculation. Much faster.
r4_n = r4.n()
r2 = r3 = None
for i in range(5):
r2temp_n = Res_n[2][i]
for j in range(5):
# Again storing away the exact number and using
# evaluated numbers in computations
r3temp_n = Res_n[3][j]
if (comp((r1_n*r2temp_n**2 + r4_n*r3temp_n**2 - testplus).n(), 0, tol) and
comp((r3temp_n*r1_n**2 + r2temp_n*r4_n**2 - testminus).n(), 0, tol)):
r2 = Res[2][i]
r3 = Res[3][j]
break
if r2:
break
# Now, we have r's so we can get roots
x1 = (r1 + r2 + r3 + r4)/5
x2 = (r1*zeta4 + r2*zeta3 + r3*zeta2 + r4*zeta1)/5
x3 = (r1*zeta3 + r2*zeta1 + r3*zeta4 + r4*zeta2)/5
x4 = (r1*zeta2 + r2*zeta4 + r3*zeta1 + r4*zeta3)/5
x5 = (r1*zeta1 + r2*zeta2 + r3*zeta3 + r4*zeta4)/5
result = [x1, x2, x3, x4, x5]
# Now check if solutions are distinct
saw = set()
for r in result:
r = r.n(2)
if r in saw:
# Roots were identical. Abort, return []
# and fall back to usual solve
return []
saw.add(r)
return result
def _quintic_simplify(expr):
expr = powsimp(expr)
expr = cancel(expr)
return together(expr)
def _integer_basis(poly):
"""Compute coefficient basis for a polynomial over integers.
Returns the integer ``div`` such that substituting ``x = div*y``
``p(x) = m*q(y)`` where the coefficients of ``q`` are smaller
than those of ``p``.
For example ``x**5 + 512*x + 1024 = 0``
with ``div = 4`` becomes ``y**5 + 2*y + 1 = 0``
Returns the integer ``div`` or ``None`` if there is no possible scaling.
Examples
========
>>> from sympy.polys import Poly
>>> from sympy.abc import x
>>> from sympy.polys.polyroots import _integer_basis
>>> p = Poly(x**5 + 512*x + 1024, x, domain='ZZ')
>>> _integer_basis(p)
4
"""
monoms, coeffs = list(zip(*poly.terms()))
monoms, = list(zip(*monoms))
coeffs = list(map(abs, coeffs))
if coeffs[0] < coeffs[-1]:
coeffs = list(reversed(coeffs))
n = monoms[0]
monoms = [n - i for i in reversed(monoms)]
else:
return None
monoms = monoms[:-1]
coeffs = coeffs[:-1]
divs = reversed(divisors(gcd_list(coeffs))[1:])
try:
div = next(divs)
except StopIteration:
return None
while True:
for monom, coeff in zip(monoms, coeffs):
if coeff % div**monom != 0:
try:
div = next(divs)
except StopIteration:
return None
else:
break
else:
return div
def preprocess_roots(poly):
"""Try to get rid of symbolic coefficients from ``poly``. """
coeff = S.One
try:
_, poly = poly.clear_denoms(convert=True)
except DomainError:
return coeff, poly
poly = poly.primitive()[1]
poly = poly.retract()
# TODO: This is fragile. Figure out how to make this independent of construct_domain().
if poly.get_domain().is_Poly and all(c.is_term for c in poly.rep.coeffs()):
poly = poly.inject()
strips = list(zip(*poly.monoms()))
gens = list(poly.gens[1:])
base, strips = strips[0], strips[1:]
for gen, strip in zip(list(gens), strips):
reverse = False
if strip[0] < strip[-1]:
strip = reversed(strip)
reverse = True
ratio = None
for a, b in zip(base, strip):
if not a and not b:
continue
elif not a or not b:
break
elif b % a != 0:
break
else:
_ratio = b // a
if ratio is None:
ratio = _ratio
elif ratio != _ratio:
break
else:
if reverse:
ratio = -ratio
poly = poly.eval(gen, 1)
coeff *= gen**(-ratio)
gens.remove(gen)
if gens:
poly = poly.eject(*gens)
if poly.is_univariate and poly.get_domain().is_ZZ:
basis = _integer_basis(poly)
if basis is not None:
n = poly.degree()
def func(k, coeff):
return coeff//basis**(n - k[0])
poly = poly.termwise(func)
coeff *= basis
return coeff, poly
@public
def roots(f, *gens, **flags):
"""
Computes symbolic roots of a univariate polynomial.
Given a univariate polynomial f with symbolic coefficients (or
a list of the polynomial's coefficients), returns a dictionary
with its roots and their multiplicities.
Only roots expressible via radicals will be returned. To get
a complete set of roots use RootOf class or numerical methods
instead. By default cubic and quartic formulas are used in
the algorithm. To disable them because of unreadable output
set ``cubics=False`` or ``quartics=False`` respectively. If cubic
roots are real but are expressed in terms of complex numbers
(casus irreducibilis [1]) the ``trig`` flag can be set to True to
have the solutions returned in terms of cosine and inverse cosine
functions.
To get roots from a specific domain set the ``filter`` flag with
one of the following specifiers: Z, Q, R, I, C. By default all
roots are returned (this is equivalent to setting ``filter='C'``).
By default a dictionary is returned giving a compact result in
case of multiple roots. However to get a list containing all
those roots set the ``multiple`` flag to True; the list will
have identical roots appearing next to each other in the result.
(For a given Poly, the all_roots method will give the roots in
sorted numerical order.)
Examples
========
>>> from sympy import Poly, roots
>>> from sympy.abc import x, y
>>> roots(x**2 - 1, x)
{-1: 1, 1: 1}
>>> p = Poly(x**2-1, x)
>>> roots(p)
{-1: 1, 1: 1}
>>> p = Poly(x**2-y, x, y)
>>> roots(Poly(p, x))
{-sqrt(y): 1, sqrt(y): 1}
>>> roots(x**2 - y, x)
{-sqrt(y): 1, sqrt(y): 1}
>>> roots([1, 0, -1])
{-1: 1, 1: 1}
References
==========
1. http://en.wikipedia.org/wiki/Cubic_function#Trigonometric_.28and_hyperbolic.29_method
"""
from sympy.polys.polytools import to_rational_coeffs
flags = dict(flags)
auto = flags.pop('auto', True)
cubics = flags.pop('cubics', True)
trig = flags.pop('trig', False)
quartics = flags.pop('quartics', True)
quintics = flags.pop('quintics', False)
multiple = flags.pop('multiple', False)
filter = flags.pop('filter', None)
predicate = flags.pop('predicate', None)
if isinstance(f, list):
if gens:
raise ValueError('redundant generators given')
x = Dummy('x')
poly, i = {}, len(f) - 1
for coeff in f:
poly[i], i = sympify(coeff), i - 1
f = Poly(poly, x, field=True)
else:
try:
f = Poly(f, *gens, **flags)
if f.length == 2 and f.degree() != 1:
# check for foo**n factors in the constant
n = f.degree()
npow_bases = []
expr = f.as_expr()
con = expr.as_independent(*gens)[0]
for p in Mul.make_args(con):
if p.is_Pow and not p.exp % n:
npow_bases.append(p.base**(p.exp/n))
else:
other.append(p)
if npow_bases:
b = Mul(*npow_bases)
B = Dummy()
d = roots(Poly(expr - con + B**n*Mul(*others), *gens,
**flags), *gens, **flags)
rv = {}
for k, v in d.items():
rv[k.subs(B, b)] = v
return rv
except GeneratorsNeeded:
if multiple:
return []
else:
return {}
if f.is_multivariate:
raise PolynomialError('multivariate polynomials are not supported')
def _update_dict(result, root, k):
if root in result:
result[root] += k
else:
result[root] = k
def _try_decompose(f):
"""Find roots using functional decomposition. """
factors, roots = f.decompose(), []
for root in _try_heuristics(factors[0]):
roots.append(root)
for factor in factors[1:]:
previous, roots = list(roots), []
for root in previous:
g = factor - Poly(root, f.gen)
for root in _try_heuristics(g):
roots.append(root)
return roots
def _try_heuristics(f):
"""Find roots using formulas and some tricks. """
if f.is_ground:
return []
if f.is_monomial:
return [S(0)]*f.degree()
if f.length() == 2:
if f.degree() == 1:
return list(map(cancel, roots_linear(f)))
else:
return roots_binomial(f)
result = []
for i in [-1, 1]:
if not f.eval(i):
f = f.quo(Poly(f.gen - i, f.gen))
result.append(i)
break
n = f.degree()
if n == 1:
result += list(map(cancel, roots_linear(f)))
elif n == 2:
result += list(map(cancel, roots_quadratic(f)))
elif f.is_cyclotomic:
result += roots_cyclotomic(f)
elif n == 3 and cubics:
result += roots_cubic(f, trig=trig)
elif n == 4 and quartics:
result += roots_quartic(f)
elif n == 5 and quintics:
result += roots_quintic(f)
return result
(k,), f = f.terms_gcd()
if not k:
zeros = {}
else:
zeros = {S(0): k}
coeff, f = preprocess_roots(f)
if auto and f.get_domain().has_Ring:
f = f.to_field()
rescale_x = None
translate_x = None
result = {}
if not f.is_ground:
if not f.get_domain().is_Exact:
for r in f.nroots():
_update_dict(result, r, 1)
elif f.degree() == 1:
result[roots_linear(f)[0]] = 1
elif f.length() == 2:
roots_fun = roots_quadratic if f.degree() == 2 else roots_binomial
for r in roots_fun(f):
_update_dict(result, r, 1)
else:
_, factors = Poly(f.as_expr()).factor_list()
if len(factors) == 1 and f.degree() == 2:
for r in roots_quadratic(f):
_update_dict(result, r, 1)
else:
if len(factors) == 1 and factors[0][1] == 1:
if f.get_domain().is_EX:
res = to_rational_coeffs(f)
if res:
if res[0] is None:
translate_x, f = res[2:]
else:
rescale_x, f = res[1], res[-1]
result = roots(f)
if not result:
for root in _try_decompose(f):
_update_dict(result, root, 1)
else:
for root in _try_decompose(f):
_update_dict(result, root, 1)
else:
for factor, k in factors:
for r in _try_heuristics(Poly(factor, f.gen, field=True)):
_update_dict(result, r, k)
if coeff is not S.One:
_result, result, = result, {}
for root, k in _result.items():
result[coeff*root] = k
result.update(zeros)
if filter not in [None, 'C']:
handlers = {
'Z': lambda r: r.is_Integer,
'Q': lambda r: r.is_Rational,
'R': lambda r: r.is_real,
'I': lambda r: r.is_imaginary,
}
try:
query = handlers[filter]
except KeyError:
raise ValueError("Invalid filter: %s" % filter)
for zero in dict(result).keys():
if not query(zero):
del result[zero]
if predicate is not None:
for zero in dict(result).keys():
if not predicate(zero):
del result[zero]
if rescale_x:
result1 = {}
for k, v in result.items():
result1[k*rescale_x] = v
result = result1
if translate_x:
result1 = {}
for k, v in result.items():
result1[k + translate_x] = v
result = result1
if not multiple:
return result
else:
zeros = []
for zero in ordered(result):
zeros.extend([zero]*result[zero])
return zeros
def root_factors(f, *gens, **args):
"""
Returns all factors of a univariate polynomial.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.polys.polyroots import root_factors
>>> root_factors(x**2 - y, x)
[x - sqrt(y), x + sqrt(y)]
"""
args = dict(args)
filter = args.pop('filter', None)
F = Poly(f, *gens, **args)
if not F.is_Poly:
return [f]
if F.is_multivariate:
raise ValueError('multivariate polynomials are not supported')
x = F.gens[0]
zeros = roots(F, filter=filter)
if not zeros:
factors = [F]
else:
factors, N = [], 0
for r, n in ordered(zeros.items()):
factors, N = factors + [Poly(x - r, x)]*n, N + n
if N < F.degree():
G = reduce(lambda p, q: p*q, factors)
factors.append(F.quo(G))
if not isinstance(f, Poly):
factors = [ f.as_expr() for f in factors ]
return factors
| {
"repo_name": "Arafatk/sympy",
"path": "sympy/polys/polyroots.py",
"copies": "31",
"size": "32388",
"license": "bsd-3-clause",
"hash": 8339600344904056000,
"line_mean": 27.969588551,
"line_max": 92,
"alpha_frac": 0.5136779054,
"autogenerated": false,
"ratio": 3.233626198083067,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004857800024329153,
"num_lines": 1118
} |
"""Algorithms for computing symbolic roots of polynomials. """
from sympy.core.symbol import Dummy
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core import S, I, Basic
from sympy.core.sympify import sympify
from sympy.core.numbers import Rational, igcd
from sympy.ntheory import divisors, isprime, nextprime
from sympy.functions import exp, sqrt, re, im
from sympy.polys.polytools import Poly, cancel, factor, gcd_list
from sympy.polys.specialpolys import cyclotomic_poly
from sympy.polys.polyerrors import PolynomialError, GeneratorsNeeded, DomainError
from sympy.simplify import simplify
from sympy.utilities import default_sort_key
from sympy.core.compatibility import reduce
import math
def roots_linear(f):
"""Returns a list of roots of a linear polynomial."""
r = -f.nth(0)/f.nth(1)
dom = f.get_domain()
if not dom.is_Numerical:
if dom.is_Composite:
r = factor(r)
else:
r = simplify(r)
return [r]
def roots_quadratic(f):
"""Returns a list of roots of a quadratic polynomial."""
a, b, c = f.all_coeffs()
dom = f.get_domain()
def _simplify(expr):
if dom.is_Composite:
return factor(expr)
else:
return simplify(expr)
if c is S.Zero:
r0, r1 = S.Zero, -b/a
if not dom.is_Numerical:
r1 = _simplify(r1)
elif b is S.Zero:
r = -c/a
if not dom.is_Numerical:
R = sqrt(_simplify(r))
else:
R = sqrt(r)
r0 = R
r1 = -R
else:
d = b**2 - 4*a*c
if dom.is_Numerical:
D = sqrt(d)
r0 = (-b + D) / (2*a)
r1 = (-b - D) / (2*a)
else:
D = sqrt(_simplify(d))
A = 2*a
E = _simplify(-b/A)
F = D/A
r0 = E + F
r1 = E - F
return sorted([r0, r1], key=default_sort_key)
def roots_cubic(f):
"""Returns a list of roots of a cubic polynomial."""
_, a, b, c = f.monic().all_coeffs()
if c is S.Zero:
x1, x2 = roots([1,a,b], multiple = True)
return [x1, S.Zero, x2]
p = b - a**2/3
q = c - a*b/3 + 2*a**3/27
pon3 = p/3
aon3 = a/3
if p is S.Zero:
if q is S.Zero:
return [-aon3]*3
else:
u1 = q**Rational(1, 3)
elif q is S.Zero:
y1, y2 = roots([1, 0, p], multiple=True)
return [tmp - aon3 for tmp in [y1, S.Zero, y2]]
else:
u1 = (q/2 + sqrt(q**2/4 + pon3**3))**Rational(1, 3)
coeff = S.ImaginaryUnit*sqrt(3)/2
u2 = u1*(-S.Half + coeff)
u3 = u1*(-S.Half - coeff)
soln = [
-u1 + pon3/u1 - aon3,
-u2 + pon3/u2 - aon3,
-u3 + pon3/u3 - aon3
]
return soln
def roots_quartic(f):
r"""
Returns a list of roots of a quartic polynomial.
There are many references for solving quartic expressions available [1-5].
This reviewer has found that many of them require one to select from among
2 or more possible sets of solutions and that some solutions work when one
is searching for real roots but don't work when searching for complex roots
(though this is not always stated clearly). The following routine has been
tested and found to be correct for 0, 2 or 4 complex roots.
The quasisymmetric case solution [6] looks for quartics that have the form
`x**4 + A*x**3 + B*x**2 + C*x + D = 0` where `(C/A)**2 = D`.
Although there is a general solution, simpler results can be obtained for
certain values of the coefficients. In all cases, 4 roots are returned:
1) `f = c + a*(a**2/8 - b/2) == 0`
2) `g = d - a*(a*(3*a**2/256 - b/16) + c/4) = 0`
3) if `f != 0` and `g != 0` and `p = -d + a*c/4 - b**2/12` then
a) `p == 0`
b) `p != 0`
**Examples**
>>> from sympy import Poly, symbols, I
>>> from sympy.polys.polyroots import roots_quartic
>>> r = roots_quartic(Poly('x**4-6*x**3+17*x**2-26*x+20'))
>>> # 4 complex roots: 1+-I*sqrt(3), 2+-I
>>> sorted(str(tmp.evalf(n=2)) for tmp in r)
['1.0 + 1.7*I', '1.0 - 1.7*I', '2.0 + 1.0*I', '2.0 - 1.0*I']
**References**
1. http://mathforum.org/dr.math/faq/faq.cubic.equations.html
2. http://en.wikipedia.org/wiki/Quartic_function#Summary_of_Ferrari.27s_method
3. http://planetmath.org/encyclopedia/GaloisTheoreticDerivationOfTheQuarticFormula.html
4. http://staff.bath.ac.uk/masjhd/JHD-CA.pdf
5. http://www.albmath.org/files/Math_5713.pdf
6. http://www.statemaster.com/encyclopedia/Quartic-equation
"""
_, a, b, c, d = f.monic().all_coeffs()
if not d:
return [S.Zero] + roots([1, a, b, c], multiple=True)
elif (c/a)**2 == d:
x, m = f.gen, c/a
g = Poly(x**2 + a*x + b - 2*m, x)
z1, z2 = roots_quadratic(g)
h1 = Poly(x**2 - z1*x + m, x)
h2 = Poly(x**2 - z2*x + m, x)
r1 = roots_quadratic(h1)
r2 = roots_quadratic(h2)
return r1 + r2
else:
a2 = a**2
e = b - 3*a2/8
f = c + a*(a2/8 - b/2)
g = d - a*(a*(3*a2/256 - b/16) + c/4)
aon4 = a/4
ans = []
if f is S.Zero:
y1, y2 = [sqrt(tmp) for tmp in
roots([1, e, g], multiple = True)]
return [tmp - aon4 for tmp in [-y1, -y2, y1, y2]]
if g is S.Zero:
y = [S.Zero] + roots([1, 0, e, f], multiple = True)
return [tmp - aon4 for tmp in y]
else:
p = -e**2/12 - g
q = -e**3/108 + e*g/3 - f**2/8
TH = Rational(1, 3)
if p is S.Zero:
y = -5*e/6 - q**TH
else:
# with p !=0 then u below is not 0
root = sqrt(q**2/4 + p**3/27)
r = -q/2 + root # or -q/2 - root
u = r**TH # primary root of solve(x**3-r, x)
y = -5*e/6 + u - p/u/3
w = sqrt(e + 2*y)
arg1 = 3*e + 2*y
arg2 = 2*f/w
for s in [-1, 1]:
root = sqrt(-(arg1 + s*arg2))
for t in [-1, 1]:
ans.append((s*w - t*root)/2 - aon4)
return ans
def roots_binomial(f):
"""Returns a list of roots of a binomial polynomial."""
n = f.degree()
a, b = f.nth(n), f.nth(0)
alpha = (-cancel(b/a))**Rational(1, n)
if alpha.is_number:
alpha = alpha.expand(complex=True)
roots, I = [], S.ImaginaryUnit
for k in xrange(n):
zeta = exp(2*k*S.Pi*I/n).expand(complex=True)
roots.append((alpha*zeta).expand(power_base=False))
return sorted(roots, key=default_sort_key)
def _inv_totient_estimate(m):
"""
Find ``(L, U)`` such that ``L <= phi^-1(m) <= U``.
**Examples**
>>> from sympy.polys.polyroots import _inv_totient_estimate
>>> _inv_totient_estimate(192)
(192, 840)
>>> _inv_totient_estimate(400)
(400, 1750)
"""
primes = [ d + 1 for d in divisors(m) if isprime(d + 1) ]
a, b = 1, 1
for p in primes:
a *= p
b *= p - 1
L = m
U = int(math.ceil(m*(float(a)/b)))
P = p = 2
primes = []
while P <= U:
p = nextprime(p)
primes.append(p)
P *= p
P //= p
b = 1
for p in primes[:-1]:
b *= p - 1
U = int(math.ceil(m*(float(P)/b)))
return L, U
def roots_cyclotomic(f, factor=False):
"""Compute roots of cyclotomic polynomials. """
L, U = _inv_totient_estimate(f.degree())
for n in xrange(L, U+1):
g = cyclotomic_poly(n, f.gen, polys=True)
if f == g:
break
else: # pragma: no cover
raise RuntimeError("failed to find index of a cyclotomic polynomial")
roots = []
if not factor:
for k in xrange(1, n+1):
if igcd(k, n) == 1:
roots.append(exp(2*k*S.Pi*I/n).expand(complex=True))
else:
g = Poly(f, extension=(-1)**Rational(1, n))
for h, _ in g.factor_list()[1]:
roots.append(-h.TC())
return sorted(roots, key=default_sort_key)
def roots_rational(f):
"""Returns a list of rational roots of a polynomial."""
domain = f.get_domain()
if domain.is_QQ:
_, f = f.clear_denoms()
elif domain.is_ZZ:
f = f.set_domain('QQ')
else:
return []
LC_divs = divisors(int(f.LC()))
EC_divs = divisors(int(f.EC()))
if not f.eval(S.Zero):
zeros = [S.Zero]
else:
zeros = []
for p in LC_divs:
for q in EC_divs:
zero = Rational(p, q)
if not f.eval(zero):
zeros.append(zero)
if not f.eval(-zero):
zeros.append(-zero)
return sorted(zeros, key=default_sort_key)
def _integer_basis(poly):
"""Compute coefficient basis for a polynomial over integers. """
monoms, coeffs = zip(*poly.terms())
monoms, = zip(*monoms)
coeffs = map(abs, coeffs)
if coeffs[0] < coeffs[-1]:
coeffs = list(reversed(coeffs))
else:
return None
monoms = monoms[:-1]
coeffs = coeffs[:-1]
divs = reversed(divisors(gcd_list(coeffs))[1:])
try:
div = divs.next()
except StopIteration:
return None
while True:
for monom, coeff in zip(monoms, coeffs):
if coeff % div**monom != 0:
try:
div = divs.next()
except StopIteration:
return None
else:
break
else:
return div
def preprocess_roots(poly):
"""Try to get rid of symbolic coefficients from ``poly``. """
coeff = S.One
try:
_, poly = poly.clear_denoms(convert=True)
except DomainError:
return coeff, poly
poly = poly.primitive()[1]
poly = poly.retract()
if poly.get_domain().is_Poly and all(c.is_monomial for c in poly.rep.coeffs()):
poly = poly.inject()
strips = zip(*poly.monoms())
gens = list(poly.gens[1:])
base, strips = strips[0], strips[1:]
for gen, strip in zip(list(gens), strips):
reverse = False
if strip[0] < strip[-1]:
strip = reversed(strip)
reverse = True
ratio = None
for a, b in zip(base, strip):
if not a and not b:
continue
elif not a or not b:
break
elif b % a != 0:
break
else:
_ratio = b // a
if ratio is None:
ratio = _ratio
elif ratio != _ratio:
break
else:
if reverse:
ratio = -ratio
poly = poly.eval(gen, 1)
coeff *= gen**(-ratio)
gens.remove(gen)
if gens:
poly = poly.eject(*gens)
if poly.is_univariate and poly.get_domain().is_ZZ:
basis = _integer_basis(poly)
if basis is not None:
n = poly.degree()
def func(k, coeff):
return coeff//basis**(n-k[0])
poly = poly.termwise(func)
coeff *= basis
return coeff, poly
def roots(f, *gens, **flags):
"""
Computes symbolic roots of a univariate polynomial.
Given a univariate polynomial f with symbolic coefficients (or
a list of the polynomial's coefficients), returns a dictionary
with its roots and their multiplicities.
Only roots expressible via radicals will be returned. To get
a complete set of roots use RootOf class or numerical methods
instead. By default cubic and quartic formulas are used in
the algorithm. To disable them because of unreadable output
set ``cubics=False`` or ``quartics=False`` respectively.
To get roots from a specific domain set the ``filter`` flag with
one of the following specifiers: Z, Q, R, I, C. By default all
roots are returned (this is equivalent to setting ``filter='C'``).
By default a dictionary is returned giving a compact result in
case of multiple roots. However to get a tuple containing all
those roots set the ``multiple`` flag to True.
**Examples**
>>> from sympy import Poly, roots
>>> from sympy.abc import x, y
>>> roots(x**2 - 1, x)
{-1: 1, 1: 1}
>>> p = Poly(x**2-1, x)
>>> roots(p)
{-1: 1, 1: 1}
>>> p = Poly(x**2-y, x, y)
>>> roots(Poly(p, x))
{-sqrt(y): 1, sqrt(y): 1}
>>> roots(x**2 - y, x)
{-sqrt(y): 1, sqrt(y): 1}
>>> roots([1, 0, -1])
{-1: 1, 1: 1}
"""
flags = dict(flags)
auto = flags.pop('auto', True)
cubics = flags.pop('cubics', True)
quartics = flags.pop('quartics', True)
multiple = flags.pop('multiple', False)
filter = flags.pop('filter', None)
predicate = flags.pop('predicate', None)
if isinstance(f, list):
if gens:
raise ValueError('redundant generators given')
x = Dummy('x')
poly, i = {}, len(f)-1
for coeff in f:
poly[i], i = sympify(coeff), i-1
f = Poly(poly, x, field=True)
else:
try:
f = Poly(f, *gens, **flags)
except GeneratorsNeeded:
if multiple:
return []
else:
return {}
if f.is_multivariate:
raise PolynomialError('multivariate polynomials are not supported')
def _update_dict(result, root, k):
if root in result:
result[root] += k
else:
result[root] = k
def _try_decompose(f):
"""Find roots using functional decomposition. """
factors, roots = f.decompose(), []
for root in _try_heuristics(factors[0]):
roots.append(root)
for factor in factors[1:]:
previous, roots = list(roots), []
for root in previous:
g = factor - Poly(root, f.gen)
for root in _try_heuristics(g):
roots.append(root)
return roots
def _try_heuristics(f):
"""Find roots using formulas and some tricks. """
if f.is_ground:
return []
if f.is_monomial:
return [S(0)]*f.degree()
if f.length() == 2:
if f.degree() == 1:
return map(cancel, roots_linear(f))
else:
return roots_binomial(f)
result = []
for i in [-1, 1]:
if not f.eval(i):
f = f.quo(Poly(f.gen - i, f.gen))
result.append(i)
break
n = f.degree()
if n == 1:
result += map(cancel, roots_linear(f))
elif n == 2:
result += map(cancel, roots_quadratic(f))
elif f.is_cyclotomic:
result += roots_cyclotomic(f)
elif n == 3 and cubics:
result += roots_cubic(f)
elif n == 4 and quartics:
result += roots_quartic(f)
return result
(k,), f = f.terms_gcd()
if not k:
zeros = {}
else:
zeros = {S(0) : k}
coeff, f = preprocess_roots(f)
if auto and f.get_domain().has_Ring:
f = f.to_field()
result = {}
if not f.is_ground:
if not f.get_domain().is_Exact:
for r in f.nroots():
_update_dict(result, r, 1)
elif f.degree() == 1:
result[roots_linear(f)[0]] = 1
elif f.degree() == 2:
for r in roots_quadratic(f):
_update_dict(result, r, 1)
elif f.length() == 2:
for r in roots_binomial(f):
_update_dict(result, r, 1)
else:
_, factors = Poly(f.as_expr()).factor_list()
if len(factors) == 1 and factors[0][1] == 1:
for root in _try_decompose(f):
_update_dict(result, root, 1)
else:
for factor, k in factors:
for r in _try_heuristics(Poly(factor, f.gen, field=True)):
_update_dict(result, r, k)
if coeff is not S.One:
_result, result, = result, {}
for root, k in _result.iteritems():
result[coeff*root] = k
result.update(zeros)
if filter not in [None, 'C']:
handlers = {
'Z' : lambda r: r.is_Integer,
'Q' : lambda r: r.is_Rational,
'R' : lambda r: r.is_real,
'I' : lambda r: r.is_imaginary,
}
try:
query = handlers[filter]
except KeyError:
raise ValueError("Invalid filter: %s" % filter)
for zero in dict(result).iterkeys():
if not query(zero):
del result[zero]
if predicate is not None:
for zero in dict(result).iterkeys():
if not predicate(zero):
del result[zero]
if not multiple:
return result
else:
zeros = []
for zero, k in result.iteritems():
zeros.extend([zero]*k)
return sorted(zeros, key=default_sort_key)
def root_factors(f, *gens, **args):
"""
Returns all factors of a univariate polynomial.
**Examples**
>>> from sympy.abc import x, y
>>> from sympy.polys.polyroots import root_factors
>>> root_factors(x**2-y, x)
[x - sqrt(y), x + sqrt(y)]
"""
args = dict(args)
filter = args.pop('filter', None)
F = Poly(f, *gens, **args)
if not F.is_Poly:
return [f]
if F.is_multivariate:
raise ValueError('multivariate polynomials not supported')
x = F.gens[0]
zeros = roots(F, filter=filter)
if not zeros:
factors = [F]
else:
factors, N = [], 0
for r, n in zeros.iteritems():
factors, N = factors + [Poly(x-r, x)]*n, N + n
if N < F.degree():
G = reduce(lambda p,q: p*q, factors)
factors.append(F.quo(G))
if not isinstance(f, Poly):
return [ f.as_expr() for f in factors ]
else:
return factors
| {
"repo_name": "Cuuuurzel/KiPyCalc",
"path": "sympy_old/polys/polyroots.py",
"copies": "2",
"size": "18242",
"license": "mit",
"hash": -8679868022038474000,
"line_mean": 25.06,
"line_max": 91,
"alpha_frac": 0.5099769762,
"autogenerated": false,
"ratio": 3.3781481481481483,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9873481513986424,
"avg_score": 0.0029287220723449086,
"num_lines": 700
} |
"""Algorithms for computing symbolic roots of polynomials. """
from sympy.core.symbol import Dummy
from sympy.core import S, I
from sympy.core.sympify import sympify
from sympy.core.numbers import Rational, igcd
from sympy.ntheory import divisors, isprime, nextprime
from sympy.functions import exp, sqrt
from sympy.polys.polytools import Poly, cancel, factor, gcd_list
from sympy.polys.specialpolys import cyclotomic_poly
from sympy.polys.polyerrors import PolynomialError, GeneratorsNeeded, DomainError
from sympy.simplify import simplify
from sympy.utilities import default_sort_key
from sympy.core.compatibility import reduce
import math
def roots_linear(f):
"""Returns a list of roots of a linear polynomial."""
r = -f.nth(0)/f.nth(1)
dom = f.get_domain()
if not dom.is_Numerical:
if dom.is_Composite:
r = factor(r)
else:
r = simplify(r)
return [r]
def roots_quadratic(f):
"""Returns a list of roots of a quadratic polynomial."""
a, b, c = f.all_coeffs()
dom = f.get_domain()
def _simplify(expr):
if dom.is_Composite:
return factor(expr)
else:
return simplify(expr)
if c is S.Zero:
r0, r1 = S.Zero, -b/a
if not dom.is_Numerical:
r1 = _simplify(r1)
elif b is S.Zero:
r = -c/a
if not dom.is_Numerical:
R = sqrt(_simplify(r))
else:
R = sqrt(r)
r0 = R
r1 = -R
else:
d = b**2 - 4*a*c
if dom.is_Numerical:
D = sqrt(d)
r0 = (-b + D) / (2*a)
r1 = (-b - D) / (2*a)
else:
D = sqrt(_simplify(d))
A = 2*a
E = _simplify(-b/A)
F = D/A
r0 = E + F
r1 = E - F
return sorted([r0, r1], key=default_sort_key)
def roots_cubic(f):
"""Returns a list of roots of a cubic polynomial."""
_, a, b, c = f.monic().all_coeffs()
if c is S.Zero:
x1, x2 = roots([1,a,b], multiple = True)
return [x1, S.Zero, x2]
p = b - a**2/3
q = c - a*b/3 + 2*a**3/27
pon3 = p/3
aon3 = a/3
if p is S.Zero:
if q is S.Zero:
return [-aon3]*3
else:
u1 = q**Rational(1, 3)
elif q is S.Zero:
y1, y2 = roots([1, 0, p], multiple=True)
return [tmp - aon3 for tmp in [y1, S.Zero, y2]]
else:
u1 = (q/2 + sqrt(q**2/4 + pon3**3))**Rational(1, 3)
coeff = S.ImaginaryUnit*sqrt(3)/2
u2 = u1*(-S.Half + coeff)
u3 = u1*(-S.Half - coeff)
soln = [
-u1 + pon3/u1 - aon3,
-u2 + pon3/u2 - aon3,
-u3 + pon3/u3 - aon3
]
return soln
def roots_quartic(f):
r"""
Returns a list of roots of a quartic polynomial.
There are many references for solving quartic expressions available [1-5].
This reviewer has found that many of them require one to select from among
2 or more possible sets of solutions and that some solutions work when one
is searching for real roots but don't work when searching for complex roots
(though this is not always stated clearly). The following routine has been
tested and found to be correct for 0, 2 or 4 complex roots.
The quasisymmetric case solution [6] looks for quartics that have the form
`x**4 + A*x**3 + B*x**2 + C*x + D = 0` where `(C/A)**2 = D`.
Although there is a general solution, simpler results can be obtained for
certain values of the coefficients. In all cases, 4 roots are returned:
1) `f = c + a*(a**2/8 - b/2) == 0`
2) `g = d - a*(a*(3*a**2/256 - b/16) + c/4) = 0`
3) if `f != 0` and `g != 0` and `p = -d + a*c/4 - b**2/12` then
a) `p == 0`
b) `p != 0`
Examples
========
>>> from sympy import Poly, symbols, I
>>> from sympy.polys.polyroots import roots_quartic
>>> r = roots_quartic(Poly('x**4-6*x**3+17*x**2-26*x+20'))
>>> # 4 complex roots: 1+-I*sqrt(3), 2+-I
>>> sorted(str(tmp.evalf(n=2)) for tmp in r)
['1.0 + 1.7*I', '1.0 - 1.7*I', '2.0 + 1.0*I', '2.0 - 1.0*I']
References
==========
1. http://mathforum.org/dr.math/faq/faq.cubic.equations.html
2. http://en.wikipedia.org/wiki/Quartic_function#Summary_of_Ferrari.27s_method
3. http://planetmath.org/encyclopedia/GaloisTheoreticDerivationOfTheQuarticFormula.html
4. http://staff.bath.ac.uk/masjhd/JHD-CA.pdf
5. http://www.albmath.org/files/Math_5713.pdf
6. http://www.statemaster.com/encyclopedia/Quartic-equation
"""
_, a, b, c, d = f.monic().all_coeffs()
if not d:
return [S.Zero] + roots([1, a, b, c], multiple=True)
elif (c/a)**2 == d:
x, m = f.gen, c/a
g = Poly(x**2 + a*x + b - 2*m, x)
z1, z2 = roots_quadratic(g)
h1 = Poly(x**2 - z1*x + m, x)
h2 = Poly(x**2 - z2*x + m, x)
r1 = roots_quadratic(h1)
r2 = roots_quadratic(h2)
return r1 + r2
else:
a2 = a**2
e = b - 3*a2/8
f = c + a*(a2/8 - b/2)
g = d - a*(a*(3*a2/256 - b/16) + c/4)
aon4 = a/4
ans = []
if f is S.Zero:
y1, y2 = [sqrt(tmp) for tmp in
roots([1, e, g], multiple = True)]
return [tmp - aon4 for tmp in [-y1, -y2, y1, y2]]
if g is S.Zero:
y = [S.Zero] + roots([1, 0, e, f], multiple = True)
return [tmp - aon4 for tmp in y]
else:
p = -e**2/12 - g
q = -e**3/108 + e*g/3 - f**2/8
TH = Rational(1, 3)
if p is S.Zero:
y = -5*e/6 - q**TH
else:
# with p !=0 then u below is not 0
root = sqrt(q**2/4 + p**3/27)
r = -q/2 + root # or -q/2 - root
u = r**TH # primary root of solve(x**3-r, x)
y = -5*e/6 + u - p/u/3
w = sqrt(e + 2*y)
arg1 = 3*e + 2*y
arg2 = 2*f/w
for s in [-1, 1]:
root = sqrt(-(arg1 + s*arg2))
for t in [-1, 1]:
ans.append((s*w - t*root)/2 - aon4)
return ans
def roots_binomial(f):
"""Returns a list of roots of a binomial polynomial."""
n = f.degree()
a, b = f.nth(n), f.nth(0)
alpha = (-cancel(b/a))**Rational(1, n)
if alpha.is_number:
alpha = alpha.expand(complex=True)
roots, I = [], S.ImaginaryUnit
for k in xrange(n):
zeta = exp(2*k*S.Pi*I/n).expand(complex=True)
roots.append((alpha*zeta).expand(power_base=False))
return sorted(roots, key=default_sort_key)
def _inv_totient_estimate(m):
"""
Find ``(L, U)`` such that ``L <= phi^-1(m) <= U``.
Examples
========
>>> from sympy.polys.polyroots import _inv_totient_estimate
>>> _inv_totient_estimate(192)
(192, 840)
>>> _inv_totient_estimate(400)
(400, 1750)
"""
primes = [ d + 1 for d in divisors(m) if isprime(d + 1) ]
a, b = 1, 1
for p in primes:
a *= p
b *= p - 1
L = m
U = int(math.ceil(m*(float(a)/b)))
P = p = 2
primes = []
while P <= U:
p = nextprime(p)
primes.append(p)
P *= p
P //= p
b = 1
for p in primes[:-1]:
b *= p - 1
U = int(math.ceil(m*(float(P)/b)))
return L, U
def roots_cyclotomic(f, factor=False):
"""Compute roots of cyclotomic polynomials. """
L, U = _inv_totient_estimate(f.degree())
for n in xrange(L, U+1):
g = cyclotomic_poly(n, f.gen, polys=True)
if f == g:
break
else: # pragma: no cover
raise RuntimeError("failed to find index of a cyclotomic polynomial")
roots = []
if not factor:
for k in xrange(1, n+1):
if igcd(k, n) == 1:
roots.append(exp(2*k*S.Pi*I/n).expand(complex=True))
else:
g = Poly(f, extension=(-1)**Rational(1, n))
for h, _ in g.factor_list()[1]:
roots.append(-h.TC())
return sorted(roots, key=default_sort_key)
def roots_rational(f):
"""Returns a list of rational roots of a polynomial."""
domain = f.get_domain()
if domain.is_QQ:
_, f = f.clear_denoms()
elif domain.is_ZZ:
f = f.set_domain('QQ')
else:
return []
LC_divs = divisors(int(f.LC()))
EC_divs = divisors(int(f.EC()))
if not f.eval(S.Zero):
zeros = [S.Zero]
else:
zeros = []
for p in LC_divs:
for q in EC_divs:
zero = Rational(p, q)
if not f.eval(zero):
zeros.append(zero)
if not f.eval(-zero):
zeros.append(-zero)
return sorted(zeros, key=default_sort_key)
def _integer_basis(poly):
"""Compute coefficient basis for a polynomial over integers. """
monoms, coeffs = zip(*poly.terms())
monoms, = zip(*monoms)
coeffs = map(abs, coeffs)
if coeffs[0] < coeffs[-1]:
coeffs = list(reversed(coeffs))
else:
return None
monoms = monoms[:-1]
coeffs = coeffs[:-1]
divs = reversed(divisors(gcd_list(coeffs))[1:])
try:
div = divs.next()
except StopIteration:
return None
while True:
for monom, coeff in zip(monoms, coeffs):
if coeff % div**monom != 0:
try:
div = divs.next()
except StopIteration:
return None
else:
break
else:
return div
def preprocess_roots(poly):
"""Try to get rid of symbolic coefficients from ``poly``. """
coeff = S.One
try:
_, poly = poly.clear_denoms(convert=True)
except DomainError:
return coeff, poly
poly = poly.primitive()[1]
poly = poly.retract()
if poly.get_domain().is_Poly and all(c.is_monomial for c in poly.rep.coeffs()):
poly = poly.inject()
strips = zip(*poly.monoms())
gens = list(poly.gens[1:])
base, strips = strips[0], strips[1:]
for gen, strip in zip(list(gens), strips):
reverse = False
if strip[0] < strip[-1]:
strip = reversed(strip)
reverse = True
ratio = None
for a, b in zip(base, strip):
if not a and not b:
continue
elif not a or not b:
break
elif b % a != 0:
break
else:
_ratio = b // a
if ratio is None:
ratio = _ratio
elif ratio != _ratio:
break
else:
if reverse:
ratio = -ratio
poly = poly.eval(gen, 1)
coeff *= gen**(-ratio)
gens.remove(gen)
if gens:
poly = poly.eject(*gens)
if poly.is_univariate and poly.get_domain().is_ZZ:
basis = _integer_basis(poly)
if basis is not None:
n = poly.degree()
def func(k, coeff):
return coeff//basis**(n-k[0])
poly = poly.termwise(func)
coeff *= basis
return coeff, poly
def roots(f, *gens, **flags):
"""
Computes symbolic roots of a univariate polynomial.
Given a univariate polynomial f with symbolic coefficients (or
a list of the polynomial's coefficients), returns a dictionary
with its roots and their multiplicities.
Only roots expressible via radicals will be returned. To get
a complete set of roots use RootOf class or numerical methods
instead. By default cubic and quartic formulas are used in
the algorithm. To disable them because of unreadable output
set ``cubics=False`` or ``quartics=False`` respectively.
To get roots from a specific domain set the ``filter`` flag with
one of the following specifiers: Z, Q, R, I, C. By default all
roots are returned (this is equivalent to setting ``filter='C'``).
By default a dictionary is returned giving a compact result in
case of multiple roots. However to get a tuple containing all
those roots set the ``multiple`` flag to True.
Examples
========
>>> from sympy import Poly, roots
>>> from sympy.abc import x, y
>>> roots(x**2 - 1, x)
{-1: 1, 1: 1}
>>> p = Poly(x**2-1, x)
>>> roots(p)
{-1: 1, 1: 1}
>>> p = Poly(x**2-y, x, y)
>>> roots(Poly(p, x))
{-sqrt(y): 1, sqrt(y): 1}
>>> roots(x**2 - y, x)
{-sqrt(y): 1, sqrt(y): 1}
>>> roots([1, 0, -1])
{-1: 1, 1: 1}
"""
flags = dict(flags)
auto = flags.pop('auto', True)
cubics = flags.pop('cubics', True)
quartics = flags.pop('quartics', True)
multiple = flags.pop('multiple', False)
filter = flags.pop('filter', None)
predicate = flags.pop('predicate', None)
if isinstance(f, list):
if gens:
raise ValueError('redundant generators given')
x = Dummy('x')
poly, i = {}, len(f)-1
for coeff in f:
poly[i], i = sympify(coeff), i-1
f = Poly(poly, x, field=True)
else:
try:
f = Poly(f, *gens, **flags)
except GeneratorsNeeded:
if multiple:
return []
else:
return {}
if f.is_multivariate:
raise PolynomialError('multivariate polynomials are not supported')
def _update_dict(result, root, k):
if root in result:
result[root] += k
else:
result[root] = k
def _try_decompose(f):
"""Find roots using functional decomposition. """
factors, roots = f.decompose(), []
for root in _try_heuristics(factors[0]):
roots.append(root)
for factor in factors[1:]:
previous, roots = list(roots), []
for root in previous:
g = factor - Poly(root, f.gen)
for root in _try_heuristics(g):
roots.append(root)
return roots
def _try_heuristics(f):
"""Find roots using formulas and some tricks. """
if f.is_ground:
return []
if f.is_monomial:
return [S(0)]*f.degree()
if f.length() == 2:
if f.degree() == 1:
return map(cancel, roots_linear(f))
else:
return roots_binomial(f)
result = []
for i in [-1, 1]:
if not f.eval(i):
f = f.quo(Poly(f.gen - i, f.gen))
result.append(i)
break
n = f.degree()
if n == 1:
result += map(cancel, roots_linear(f))
elif n == 2:
result += map(cancel, roots_quadratic(f))
elif f.is_cyclotomic:
result += roots_cyclotomic(f)
elif n == 3 and cubics:
result += roots_cubic(f)
elif n == 4 and quartics:
result += roots_quartic(f)
return result
(k,), f = f.terms_gcd()
if not k:
zeros = {}
else:
zeros = {S(0) : k}
coeff, f = preprocess_roots(f)
if auto and f.get_domain().has_Ring:
f = f.to_field()
result = {}
if not f.is_ground:
if not f.get_domain().is_Exact:
for r in f.nroots():
_update_dict(result, r, 1)
elif f.degree() == 1:
result[roots_linear(f)[0]] = 1
elif f.degree() == 2:
for r in roots_quadratic(f):
_update_dict(result, r, 1)
elif f.length() == 2:
for r in roots_binomial(f):
_update_dict(result, r, 1)
else:
_, factors = Poly(f.as_expr()).factor_list()
if len(factors) == 1 and factors[0][1] == 1:
for root in _try_decompose(f):
_update_dict(result, root, 1)
else:
for factor, k in factors:
for r in _try_heuristics(Poly(factor, f.gen, field=True)):
_update_dict(result, r, k)
if coeff is not S.One:
_result, result, = result, {}
for root, k in _result.iteritems():
result[coeff*root] = k
result.update(zeros)
if filter not in [None, 'C']:
handlers = {
'Z' : lambda r: r.is_Integer,
'Q' : lambda r: r.is_Rational,
'R' : lambda r: r.is_real,
'I' : lambda r: r.is_imaginary,
}
try:
query = handlers[filter]
except KeyError:
raise ValueError("Invalid filter: %s" % filter)
for zero in dict(result).iterkeys():
if not query(zero):
del result[zero]
if predicate is not None:
for zero in dict(result).iterkeys():
if not predicate(zero):
del result[zero]
if not multiple:
return result
else:
zeros = []
for zero, k in result.iteritems():
zeros.extend([zero]*k)
return sorted(zeros, key=default_sort_key)
def root_factors(f, *gens, **args):
"""
Returns all factors of a univariate polynomial.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.polys.polyroots import root_factors
>>> root_factors(x**2 - y, x)
[x - sqrt(y), x + sqrt(y)]
"""
args = dict(args)
filter = args.pop('filter', None)
F = Poly(f, *gens, **args)
if not F.is_Poly:
return [f]
if F.is_multivariate:
raise ValueError('multivariate polynomials not supported')
x = F.gens[0]
zeros = roots(F, filter=filter)
if not zeros:
factors = [F]
else:
factors, N = [], 0
for r, n in zeros.iteritems():
factors, N = factors + [Poly(x-r, x)]*n, N + n
if N < F.degree():
G = reduce(lambda p,q: p*q, factors)
factors.append(F.quo(G))
if not isinstance(f, Poly):
factors = [ f.as_expr() for f in factors ]
return sorted(factors, key=default_sort_key)
| {
"repo_name": "srjoglekar246/sympy",
"path": "sympy/polys/polyroots.py",
"copies": "3",
"size": "18233",
"license": "bsd-3-clause",
"hash": -5527631685905393000,
"line_mean": 24.9729344729,
"line_max": 91,
"alpha_frac": 0.5080897274,
"autogenerated": false,
"ratio": 3.385886722376973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0014923852852553835,
"num_lines": 702
} |
"""Algorithms for computing symbolic roots of polynomials. """
from sympy.core.symbol import Dummy, Symbol, symbols
from sympy.core import S, I, pi
from sympy.core.sympify import sympify
from sympy.core.numbers import Rational, igcd
from sympy.ntheory import divisors, isprime, nextprime
from sympy.functions import exp, sqrt, re, im, Abs, cos, sin
from sympy.polys.polytools import Poly, cancel, factor, gcd_list, discriminant
from sympy.polys.specialpolys import cyclotomic_poly
from sympy.polys.polyerrors import PolynomialError, GeneratorsNeeded, DomainError
from sympy.polys.polyquinticconst import PolyQuintic
from sympy.polys.rationaltools import together
from sympy.simplify import simplify, powsimp
from sympy.utilities import default_sort_key
from sympy.core.compatibility import reduce
import math
def roots_linear(f):
"""Returns a list of roots of a linear polynomial."""
r = -f.nth(0)/f.nth(1)
dom = f.get_domain()
if not dom.is_Numerical:
if dom.is_Composite:
r = factor(r)
else:
r = simplify(r)
return [r]
def roots_quadratic(f):
"""Returns a list of roots of a quadratic polynomial."""
a, b, c = f.all_coeffs()
dom = f.get_domain()
def _simplify(expr):
if dom.is_Composite:
return factor(expr)
else:
return simplify(expr)
if c is S.Zero:
r0, r1 = S.Zero, -b/a
if not dom.is_Numerical:
r1 = _simplify(r1)
elif b is S.Zero:
r = -c/a
if not dom.is_Numerical:
R = sqrt(_simplify(r))
else:
R = sqrt(r)
r0 = R
r1 = -R
else:
d = b**2 - 4*a*c
if dom.is_Numerical:
D = sqrt(d)
r0 = (-b + D) / (2*a)
r1 = (-b - D) / (2*a)
else:
D = sqrt(_simplify(d))
A = 2*a
E = _simplify(-b/A)
F = D/A
r0 = E + F
r1 = E - F
return sorted([r0, r1], key=default_sort_key)
def roots_cubic(f):
"""Returns a list of roots of a cubic polynomial."""
_, a, b, c = f.monic().all_coeffs()
if c is S.Zero:
x1, x2 = roots([1, a, b], multiple=True)
return [x1, S.Zero, x2]
p = b - a**2/3
q = c - a*b/3 + 2*a**3/27
pon3 = p/3
aon3 = a/3
if p is S.Zero:
if q is S.Zero:
return [-aon3]*3
else:
u1 = q**Rational(1, 3)
elif q is S.Zero:
y1, y2 = roots([1, 0, p], multiple=True)
return [tmp - aon3 for tmp in [y1, S.Zero, y2]]
else:
u1 = (q/2 + sqrt(q**2/4 + pon3**3))**Rational(1, 3)
coeff = S.ImaginaryUnit*sqrt(3)/2
u2 = u1*(-S.Half + coeff)
u3 = u1*(-S.Half - coeff)
soln = [
-u1 + pon3/u1 - aon3,
-u2 + pon3/u2 - aon3,
-u3 + pon3/u3 - aon3
]
return soln
def roots_quartic(f):
r"""
Returns a list of roots of a quartic polynomial.
There are many references for solving quartic expressions available [1-5].
This reviewer has found that many of them require one to select from among
2 or more possible sets of solutions and that some solutions work when one
is searching for real roots but don't work when searching for complex roots
(though this is not always stated clearly). The following routine has been
tested and found to be correct for 0, 2 or 4 complex roots.
The quasisymmetric case solution [6] looks for quartics that have the form
`x**4 + A*x**3 + B*x**2 + C*x + D = 0` where `(C/A)**2 = D`.
Although there is a general solution, simpler results can be obtained for
certain values of the coefficients. In all cases, 4 roots are returned:
1) `f = c + a*(a**2/8 - b/2) == 0`
2) `g = d - a*(a*(3*a**2/256 - b/16) + c/4) = 0`
3) if `f != 0` and `g != 0` and `p = -d + a*c/4 - b**2/12` then
a) `p == 0`
b) `p != 0`
Examples
========
>>> from sympy import Poly, symbols, I
>>> from sympy.polys.polyroots import roots_quartic
>>> r = roots_quartic(Poly('x**4-6*x**3+17*x**2-26*x+20'))
>>> # 4 complex roots: 1+-I*sqrt(3), 2+-I
>>> sorted(str(tmp.evalf(n=2)) for tmp in r)
['1.0 + 1.7*I', '1.0 - 1.7*I', '2.0 + 1.0*I', '2.0 - 1.0*I']
References
==========
1. http://mathforum.org/dr.math/faq/faq.cubic.equations.html
2. http://en.wikipedia.org/wiki/Quartic_function#Summary_of_Ferrari.27s_method
3. http://planetmath.org/encyclopedia/GaloisTheoreticDerivationOfTheQuarticFormula.html
4. http://staff.bath.ac.uk/masjhd/JHD-CA.pdf
5. http://www.albmath.org/files/Math_5713.pdf
6. http://www.statemaster.com/encyclopedia/Quartic-equation
"""
_, a, b, c, d = f.monic().all_coeffs()
if not d:
return [S.Zero] + roots([1, a, b, c], multiple=True)
elif (c/a)**2 == d:
x, m = f.gen, c/a
g = Poly(x**2 + a*x + b - 2*m, x)
z1, z2 = roots_quadratic(g)
h1 = Poly(x**2 - z1*x + m, x)
h2 = Poly(x**2 - z2*x + m, x)
r1 = roots_quadratic(h1)
r2 = roots_quadratic(h2)
return r1 + r2
else:
a2 = a**2
e = b - 3*a2/8
f = c + a*(a2/8 - b/2)
g = d - a*(a*(3*a2/256 - b/16) + c/4)
aon4 = a/4
ans = []
if f is S.Zero:
y1, y2 = [sqrt(tmp) for tmp in
roots([1, e, g], multiple=True)]
return [tmp - aon4 for tmp in [-y1, -y2, y1, y2]]
if g is S.Zero:
y = [S.Zero] + roots([1, 0, e, f], multiple=True)
return [tmp - aon4 for tmp in y]
else:
p = -e**2/12 - g
q = -e**3/108 + e*g/3 - f**2/8
TH = Rational(1, 3)
if p is S.Zero:
y = -5*e/6 - q**TH
else:
# with p !=0 then u below is not 0
root = sqrt(q**2/4 + p**3/27)
r = -q/2 + root # or -q/2 - root
u = r**TH # primary root of solve(x**3-r, x)
y = -5*e/6 + u - p/u/3
w = sqrt(e + 2*y)
arg1 = 3*e + 2*y
arg2 = 2*f/w
for s in [-1, 1]:
root = sqrt(-(arg1 + s*arg2))
for t in [-1, 1]:
ans.append((s*w - t*root)/2 - aon4)
return ans
def roots_binomial(f):
"""Returns a list of roots of a binomial polynomial."""
n = f.degree()
a, b = f.nth(n), f.nth(0)
alpha = (-cancel(b/a))**Rational(1, n)
if alpha.is_number:
alpha = alpha.expand(complex=True)
roots, I = [], S.ImaginaryUnit
for k in xrange(n):
zeta = exp(2*k*S.Pi*I/n).expand(complex=True)
roots.append((alpha*zeta).expand(power_base=False))
return sorted(roots, key=default_sort_key)
def _inv_totient_estimate(m):
"""
Find ``(L, U)`` such that ``L <= phi^-1(m) <= U``.
Examples
========
>>> from sympy.polys.polyroots import _inv_totient_estimate
>>> _inv_totient_estimate(192)
(192, 840)
>>> _inv_totient_estimate(400)
(400, 1750)
"""
primes = [ d + 1 for d in divisors(m) if isprime(d + 1) ]
a, b = 1, 1
for p in primes:
a *= p
b *= p - 1
L = m
U = int(math.ceil(m*(float(a)/b)))
P = p = 2
primes = []
while P <= U:
p = nextprime(p)
primes.append(p)
P *= p
P //= p
b = 1
for p in primes[:-1]:
b *= p - 1
U = int(math.ceil(m*(float(P)/b)))
return L, U
def roots_cyclotomic(f, factor=False):
"""Compute roots of cyclotomic polynomials. """
L, U = _inv_totient_estimate(f.degree())
for n in xrange(L, U + 1):
g = cyclotomic_poly(n, f.gen, polys=True)
if f == g:
break
else: # pragma: no cover
raise RuntimeError("failed to find index of a cyclotomic polynomial")
roots = []
if not factor:
for k in xrange(1, n + 1):
if igcd(k, n) == 1:
roots.append(exp(2*k*S.Pi*I/n).expand(complex=True))
else:
g = Poly(f, extension=(-1)**Rational(1, n))
for h, _ in g.factor_list()[1]:
roots.append(-h.TC())
return sorted(roots, key=default_sort_key)
def roots_quintic(f):
"""
Calulate exact roots of a solvable quintic
"""
result = []
coeff_5, coeff_4, p, q, r, s = f.all_coeffs()
# Eqn must be of the form x^5 + px^3 + qx^2 + rx + s
if coeff_4:
return result
if coeff_5 != 1:
l = [p/coeff_5, q/coeff_5, r/coeff_5, s/coeff_5]
if not all(coeff.is_Rational for coeff in l):
return result
f = Poly(f/coeff_5)
quintic = PolyQuintic(f)
# Eqn standardised. Algo for solving starts here
if not f.is_irreducible:
return result
f20 = quintic.f20
# Check if f20 has linear factors over domain Z
if f20.is_irreducible:
return result
# Now, we know that f is solvable
for _factor in f20.factor_list()[1]:
if _factor[0].is_linear:
theta = _factor[0].root(0)
break
d = discriminant(f)
delta = sqrt(d)
# zeta = a fifth root of unity
zeta1, zeta2, zeta3, zeta4 = quintic.zeta
T = quintic.T(theta, d)
tol = S(1e-10)
alpha = T[1] + T[2]*delta
alpha_bar = T[1] - T[2]*delta
beta = T[3] + T[4]*delta
beta_bar = T[3] - T[4]*delta
disc = alpha**2 - 4*beta
disc_bar = alpha_bar**2 - 4*beta_bar
l0 = quintic.l0(theta)
l1 = _quintic_simplify((-alpha + sqrt(disc)) / S(2))
l4 = _quintic_simplify((-alpha - sqrt(disc)) / S(2))
l2 = _quintic_simplify((-alpha_bar + sqrt(disc_bar)) / S(2))
l3 = _quintic_simplify((-alpha_bar - sqrt(disc_bar)) / S(2))
order = quintic.order(theta, d)
test = (order*delta.n()) - ( (l1.n() - l4.n())*(l2.n() - l3.n()) )
# Comparing floats
# Problems importing on top
from sympy.utilities.randtest import comp
if not comp(test, 0, tol):
l2, l3 = l3, l2
# Now we have correct order of l's
R1 = l0 + l1*zeta1 + l2*zeta2 + l3*zeta3 + l4*zeta4
R2 = l0 + l3*zeta1 + l1*zeta2 + l4*zeta3 + l2*zeta4
R3 = l0 + l2*zeta1 + l4*zeta2 + l1*zeta3 + l3*zeta4
R4 = l0 + l4*zeta1 + l3*zeta2 + l2*zeta3 + l1*zeta4
Res = [None, [None]*5, [None]*5, [None]*5, [None]*5]
Res_n = [None, [None]*5, [None]*5, [None]*5, [None]*5]
sol = Symbol('sol')
# Simplifying improves performace a lot for exact expressions
R1 = _quintic_simplify(R1)
R2 = _quintic_simplify(R2)
R3 = _quintic_simplify(R3)
R4 = _quintic_simplify(R4)
# Solve imported here. Causing problems if imported as 'solve'
# and hence the changed name
from sympy.solvers.solvers import solve as _solve
a, b = symbols('a b', cls=Dummy)
_sol = _solve( sol**5 - a - I*b, sol)
for i in range(5):
_sol[i] = factor(_sol[i])
R1 = R1.as_real_imag()
R2 = R2.as_real_imag()
R3 = R3.as_real_imag()
R4 = R4.as_real_imag()
for i, root in enumerate(_sol):
Res[1][i] = _quintic_simplify(root.subs({ a: R1[0], b: R1[1] }))
Res[2][i] = _quintic_simplify(root.subs({ a: R2[0], b: R2[1] }))
Res[3][i] = _quintic_simplify(root.subs({ a: R3[0], b: R3[1] }))
Res[4][i] = _quintic_simplify(root.subs({ a: R4[0], b: R4[1] }))
for i in range(1, 5):
for j in range(5):
Res_n[i][j] = Res[i][j].n()
Res[i][j] = _quintic_simplify(Res[i][j])
r1 = Res[1][0]
r1_n = Res_n[1][0]
for i in range(5):
if comp(im(r1_n*Res_n[4][i]), 0, tol):
r4 = Res[4][i]
break
u, v = quintic.uv(theta, d)
sqrt5 = math.sqrt(5)
# Now we have various Res values. Each will be a list of five
# values. We have to pick one r value from those five for each Res
u, v = quintic.uv(theta, d)
testplus = (u + v*delta*sqrt(5)).n()
testminus = (u - v*delta*sqrt(5)).n()
# Evaluated numbers suffixed with _n
# We will use evaluated numbers for calculation. Much faster.
r4_n = r4.n()
r2 = r3 = None
for i in range(5):
r2temp_n = Res_n[2][i]
for j in range(5):
# Again storing away the exact number and using
# evaluated numbers in computations
r3temp_n = Res_n[3][j]
if( comp( r1_n*r2temp_n**2 + r4_n*r3temp_n**2 - testplus, 0, tol) and
comp( r3temp_n*r1_n**2 + r2temp_n*r4_n**2 - testminus, 0, tol ) ):
r2 = Res[2][i]
r3 = Res[3][j]
break
if r2:
break
# Now, we have r's so we can get roots
x1 = (r1 + r2 + r3 + r4)/5
x2 = (r1*zeta4 + r2*zeta3 + r3*zeta2 + r4*zeta1)/5
x3 = (r1*zeta3 + r2*zeta1 + r3*zeta4 + r4*zeta2)/5
x4 = (r1*zeta2 + r2*zeta4 + r3*zeta1 + r4*zeta3)/5
x5 = (r1*zeta1 + r2*zeta2 + r3*zeta3 + r4*zeta4)/5
result = [x1, x2, x3, x4, x5]
# Now check if solutions are distinct
result_n = []
for root in result:
result_n.append(root.n(5))
result_n = sorted(result_n)
prev_entry = None
for r in result_n:
if r == prev_entry:
# Roots are identical. Abort. Return []
# and fall back to usual solve
return []
prev_entry = r
return result
def _quintic_simplify(expr):
expr = powsimp(expr)
expr = cancel(expr)
return together(expr)
def _integer_basis(poly):
"""Compute coefficient basis for a polynomial over integers. """
monoms, coeffs = zip(*poly.terms())
monoms, = zip(*monoms)
coeffs = map(abs, coeffs)
if coeffs[0] < coeffs[-1]:
coeffs = list(reversed(coeffs))
else:
return None
monoms = monoms[:-1]
coeffs = coeffs[:-1]
divs = reversed(divisors(gcd_list(coeffs))[1:])
try:
div = divs.next()
except StopIteration:
return None
while True:
for monom, coeff in zip(monoms, coeffs):
if coeff % div**monom != 0:
try:
div = divs.next()
except StopIteration:
return None
else:
break
else:
return div
def preprocess_roots(poly):
"""Try to get rid of symbolic coefficients from ``poly``. """
coeff = S.One
try:
_, poly = poly.clear_denoms(convert=True)
except DomainError:
return coeff, poly
poly = poly.primitive()[1]
poly = poly.retract()
if poly.get_domain().is_Poly and all(c.is_monomial for c in poly.rep.coeffs()):
poly = poly.inject()
strips = zip(*poly.monoms())
gens = list(poly.gens[1:])
base, strips = strips[0], strips[1:]
for gen, strip in zip(list(gens), strips):
reverse = False
if strip[0] < strip[-1]:
strip = reversed(strip)
reverse = True
ratio = None
for a, b in zip(base, strip):
if not a and not b:
continue
elif not a or not b:
break
elif b % a != 0:
break
else:
_ratio = b // a
if ratio is None:
ratio = _ratio
elif ratio != _ratio:
break
else:
if reverse:
ratio = -ratio
poly = poly.eval(gen, 1)
coeff *= gen**(-ratio)
gens.remove(gen)
if gens:
poly = poly.eject(*gens)
if poly.is_univariate and poly.get_domain().is_ZZ:
basis = _integer_basis(poly)
if basis is not None:
n = poly.degree()
def func(k, coeff):
return coeff//basis**(n - k[0])
poly = poly.termwise(func)
coeff *= basis
return coeff, poly
def roots(f, *gens, **flags):
"""
Computes symbolic roots of a univariate polynomial.
Given a univariate polynomial f with symbolic coefficients (or
a list of the polynomial's coefficients), returns a dictionary
with its roots and their multiplicities.
Only roots expressible via radicals will be returned. To get
a complete set of roots use RootOf class or numerical methods
instead. By default cubic and quartic formulas are used in
the algorithm. To disable them because of unreadable output
set ``cubics=False`` or ``quartics=False`` respectively.
To get roots from a specific domain set the ``filter`` flag with
one of the following specifiers: Z, Q, R, I, C. By default all
roots are returned (this is equivalent to setting ``filter='C'``).
By default a dictionary is returned giving a compact result in
case of multiple roots. However to get a tuple containing all
those roots set the ``multiple`` flag to True.
Examples
========
>>> from sympy import Poly, roots
>>> from sympy.abc import x, y
>>> roots(x**2 - 1, x)
{-1: 1, 1: 1}
>>> p = Poly(x**2-1, x)
>>> roots(p)
{-1: 1, 1: 1}
>>> p = Poly(x**2-y, x, y)
>>> roots(Poly(p, x))
{-sqrt(y): 1, sqrt(y): 1}
>>> roots(x**2 - y, x)
{-sqrt(y): 1, sqrt(y): 1}
>>> roots([1, 0, -1])
{-1: 1, 1: 1}
"""
flags = dict(flags)
auto = flags.pop('auto', True)
cubics = flags.pop('cubics', True)
quartics = flags.pop('quartics', True)
quintics = flags.pop('quintics', False)
multiple = flags.pop('multiple', False)
filter = flags.pop('filter', None)
predicate = flags.pop('predicate', None)
if isinstance(f, list):
if gens:
raise ValueError('redundant generators given')
x = Dummy('x')
poly, i = {}, len(f) - 1
for coeff in f:
poly[i], i = sympify(coeff), i - 1
f = Poly(poly, x, field=True)
else:
try:
f = Poly(f, *gens, **flags)
except GeneratorsNeeded:
if multiple:
return []
else:
return {}
if f.is_multivariate:
raise PolynomialError('multivariate polynomials are not supported')
def _update_dict(result, root, k):
if root in result:
result[root] += k
else:
result[root] = k
def _try_decompose(f):
"""Find roots using functional decomposition. """
factors, roots = f.decompose(), []
for root in _try_heuristics(factors[0]):
roots.append(root)
for factor in factors[1:]:
previous, roots = list(roots), []
for root in previous:
g = factor - Poly(root, f.gen)
for root in _try_heuristics(g):
roots.append(root)
return roots
def _try_heuristics(f):
"""Find roots using formulas and some tricks. """
if f.is_ground:
return []
if f.is_monomial:
return [S(0)]*f.degree()
if f.length() == 2:
if f.degree() == 1:
return map(cancel, roots_linear(f))
else:
return roots_binomial(f)
result = []
for i in [-1, 1]:
if not f.eval(i):
f = f.quo(Poly(f.gen - i, f.gen))
result.append(i)
break
n = f.degree()
if n == 1:
result += map(cancel, roots_linear(f))
elif n == 2:
result += map(cancel, roots_quadratic(f))
elif f.is_cyclotomic:
result += roots_cyclotomic(f)
elif n == 3 and cubics:
result += roots_cubic(f)
elif n == 4 and quartics:
result += roots_quartic(f)
elif n == 5 and quintics:
result += roots_quintic(f)
return result
(k,), f = f.terms_gcd()
if not k:
zeros = {}
else:
zeros = {S(0): k}
coeff, f = preprocess_roots(f)
if auto and f.get_domain().has_Ring:
f = f.to_field()
result = {}
if not f.is_ground:
if not f.get_domain().is_Exact:
for r in f.nroots():
_update_dict(result, r, 1)
elif f.degree() == 1:
result[roots_linear(f)[0]] = 1
elif f.degree() == 2:
for r in roots_quadratic(f):
_update_dict(result, r, 1)
elif f.length() == 2:
for r in roots_binomial(f):
_update_dict(result, r, 1)
else:
_, factors = Poly(f.as_expr()).factor_list()
if len(factors) == 1 and factors[0][1] == 1:
for root in _try_decompose(f):
_update_dict(result, root, 1)
else:
for factor, k in factors:
for r in _try_heuristics(Poly(factor, f.gen, field=True)):
_update_dict(result, r, k)
if coeff is not S.One:
_result, result, = result, {}
for root, k in _result.iteritems():
result[coeff*root] = k
result.update(zeros)
if filter not in [None, 'C']:
handlers = {
'Z': lambda r: r.is_Integer,
'Q': lambda r: r.is_Rational,
'R': lambda r: r.is_real,
'I': lambda r: r.is_imaginary,
}
try:
query = handlers[filter]
except KeyError:
raise ValueError("Invalid filter: %s" % filter)
for zero in dict(result).iterkeys():
if not query(zero):
del result[zero]
if predicate is not None:
for zero in dict(result).iterkeys():
if not predicate(zero):
del result[zero]
if not multiple:
return result
else:
zeros = []
for zero, k in result.iteritems():
zeros.extend([zero]*k)
return sorted(zeros, key=default_sort_key)
def root_factors(f, *gens, **args):
"""
Returns all factors of a univariate polynomial.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.polys.polyroots import root_factors
>>> root_factors(x**2 - y, x)
[x - sqrt(y), x + sqrt(y)]
"""
args = dict(args)
filter = args.pop('filter', None)
F = Poly(f, *gens, **args)
if not F.is_Poly:
return [f]
if F.is_multivariate:
raise ValueError('multivariate polynomials not supported')
x = F.gens[0]
zeros = roots(F, filter=filter)
if not zeros:
factors = [F]
else:
factors, N = [], 0
for r, n in zeros.iteritems():
factors, N = factors + [Poly(x - r, x)]*n, N + n
if N < F.degree():
G = reduce(lambda p, q: p*q, factors)
factors.append(F.quo(G))
if not isinstance(f, Poly):
factors = [ f.as_expr() for f in factors ]
return sorted(factors, key=default_sort_key)
| {
"repo_name": "amitjamadagni/sympy",
"path": "sympy/polys/polyroots.py",
"copies": "1",
"size": "23045",
"license": "bsd-3-clause",
"hash": -3459258910794162000,
"line_mean": 25.921728972,
"line_max": 91,
"alpha_frac": 0.5182035149,
"autogenerated": false,
"ratio": 3.186091524955067,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4204295039855067,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.